Skip to content

Commit

Permalink
Merge branch 'dev' into #343_suggest_change_and_log
Browse files Browse the repository at this point in the history
  • Loading branch information
marcos-lg committed May 25, 2021
2 parents dfd7143 + abf373a commit ef02ec7
Show file tree
Hide file tree
Showing 7 changed files with 49 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -51,21 +51,23 @@ public class DatasetEsFieldMapper implements EsFieldMapper<DatasetSearchParamete
.put(DatasetSearchParameter.CONTINENT, "continent")
.put(DatasetSearchParameter.COUNTRY, "country")
.put(DatasetSearchParameter.PUBLISHING_COUNTRY, "publishingCountry")
.put(DatasetSearchParameter.PUBLISHING_ORG, "publishingOrganizationKey")
.put(DatasetSearchParameter.ENDORSING_NODE_KEY, "endorsingNodeKey")
.put(DatasetSearchParameter.YEAR, "year")
.put(DatasetSearchParameter.DECADE, "decade")
.put(DatasetSearchParameter.INSTALLATION_KEY, "installationKey")
.put(DatasetSearchParameter.HOSTING_ORG, "hostingOrganizationKey")
.put(DatasetSearchParameter.HOSTING_COUNTRY, "hostingCountry")
.put(DatasetSearchParameter.KEYWORD, "keyword")
.put(DatasetSearchParameter.LICENSE, "license")
.put(DatasetSearchParameter.MODIFIED_DATE, "modified")
.put(DatasetSearchParameter.PROJECT_ID, "project.identifier")
.put(DatasetSearchParameter.PUBLISHING_ORG, "publishingOrganizationKey")
.put(DatasetSearchParameter.RECORD_COUNT, "occurrenceCount")
.put(DatasetSearchParameter.SUBTYPE, "subtype")
.put(DatasetSearchParameter.TYPE, "type")
.put(DatasetSearchParameter.DATASET_TITLE, "title")
.put(DatasetSearchParameter.DOI, "doi")
.put(DatasetSearchParameter.NETWORK_KEY, "networkKeys")
.put(DatasetSearchParameter.INSTALLATION_KEY, "installationKey")
.put(DatasetSearchParameter.ENDPOINT_TYPE, "endpoints.type")
.build();

Expand Down Expand Up @@ -152,9 +154,11 @@ public String[] getMappedFields() {
"description",
"publishingOrganizationKey",
"publishingOrganizationTitle",
"publishingCountry",
"endorsingNodeKey",
"hostingOrganizationKey",
"hostingOrganizationTitle",
"publishingCountry",
"hostingCountry",
"license",
"projectId",
"nameUsagesCount",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,9 @@ private void addTitles(ObjectNode dataset) {
dataset.put("hostingOrganizationKey", hostingOrg.getKey().toString());
dataset.put("hostingOrganizationTitle", hostingOrg.getTitle());
dataset.put("hostingOrganizationTitleAutocomplete", hostingOrg.getTitle());
if (Objects.nonNull(hostingOrg.getCountry())) {
dataset.put("hostingCountry", hostingOrg.getCountry().getIso2LetterCode());
}
}
}
}
Expand All @@ -229,6 +232,9 @@ private void addTitles(ObjectNode dataset) {
if (Objects.nonNull(publisher.getCountry())) {
dataset.put("publishingCountry", publisher.getCountry().getIso2LetterCode());
}
if (Objects.nonNull(publisher.getEndorsingNodeKey())) {
dataset.put("endorsingNodeKey", publisher.getEndorsingNodeKey().toString());
}
} else {
dataset.put("publishingCountry", Country.UNKNOWN.getIso2LetterCode());
}
Expand Down
2 changes: 2 additions & 0 deletions registry-search/src/main/resources/dataset-es-mapping.json
Original file line number Diff line number Diff line change
Expand Up @@ -143,9 +143,11 @@
"publishingOrganizationTitle": {"type": "text", "copy_to": "all"},
"publishingOrganizationTitleAutocomplete": {"type": "text", "analyzer": "autocomplete", "search_analyzer": "autocomplete_search"},
"publishingCountry": {"type": "keyword"},
"endorsingNodeKey": {"type": "keyword"},
"hostingOrganizationKey": {"type": "keyword"},
"hostingOrganizationTitle": {"type": "text", "copy_to": "all"},
"hostingOrganizationTitleAutocomplete": {"type": "text", "analyzer": "autocomplete", "search_analyzer": "autocomplete_search"},
"hostingCountry": {"type": "keyword"},
"installationTitle": {"type": "text", "copy_to": "all"},
"installationTitleAutocomplete": {"type": "text", "analyzer": "autocomplete", "search_analyzer": "autocomplete_search"},
"networkKeys": {"type": "keyword"},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import org.gbif.api.model.common.export.ExportFormat;
import org.gbif.api.model.occurrence.DownloadStatistics;
import org.gbif.api.model.registry.search.DatasetSearchResult;
import org.gbif.api.vocabulary.Country;
import org.gbif.api.vocabulary.DatasetSubtype;
import org.gbif.api.vocabulary.DatasetType;
import org.gbif.api.vocabulary.License;
Expand Down Expand Up @@ -88,8 +89,8 @@ public static CsvWriter<DownloadStatistics> downloadStatisticsCsvWriter(Iterable
public static CsvWriter<DatasetSearchResult> datasetSearchResultCsvWriter(Iterable<DatasetSearchResult> pager,
ExportFormat preference) {
return CsvWriter.<DatasetSearchResult>builder()
.fields(new String[]{"key", "title", "doi", "license", "type", "subType", "hostingOrganizationKey", "hostingOrganizationTitle", "publishingOrganizationKey", "publishingOrganizationTitle","networkKeys", "projectIdentifier", "recordCount", "nameUsagesCount"})
.header(new String[]{"dataset_key", "title", "doi", "license", "type", "sub_type", "hosting_organization_Key", "hosting_organization_title", "publishing_organization_key", "publishing_organization_title","network_keys", "project_identifier", "occurrence_records_count", "name_usages_count"})
.fields(new String[]{"key", "title", "doi", "license", "type", "subType", "hostingOrganizationKey", "hostingOrganizationTitle", "hostingCountry", "publishingOrganizationKey", "publishingOrganizationTitle", "publishingCountry","endorsingNodeKey", "networkKeys", "projectIdentifier", "recordCount", "nameUsagesCount"})
.header(new String[]{"dataset_key", "title", "doi", "license", "type", "sub_type", "hosting_organization_Key", "hosting_organization_title", "hosting_country","publishing_organization_key", "publishing_organization_title", "publishing_country", "endorsing_node_key", "network_keys", "project_identifier", "occurrence_records_count", "name_usages_count"})
// "recordCount", "nameUsagesCount"
.processors(new CellProcessor[]{new UUIDProcessor(), //key
null, //title
Expand All @@ -99,12 +100,15 @@ public static CsvWriter<DatasetSearchResult> datasetSearchResultCsvWriter(Iterab
new Optional(new ParseEnum(DatasetSubtype.class)),//subType
new UUIDProcessor(), //hostingOrganizationKey
null, //hostingOrganizationTitle
new CountryProcessor(), //hostingCountry
new UUIDProcessor(), //publishingOrganizationKey
null, //publishingOrganizationTitle
new CountryProcessor(), //publishingCountry
new UUIDProcessor(), //endorsingNodeKey
new ListUUIDProcessor(), //networkKeys
null, //projectIdentifier
new ParseInt(), //recordCount
new ParseInt() //nameUsagesCount
new Optional(new ParseInt()), //recordCount
new Optional(new ParseInt()) //nameUsagesCount
})
.preference(preference)
.pager(pager)
Expand Down Expand Up @@ -141,4 +145,15 @@ public String execute(Object value, CsvContext csvContext) {
return value != null ? value.toString() : "";
}
}


/**
* Null aware Country processor.
*/
private static class CountryProcessor implements CellProcessor {
@Override
public String execute(Object value, CsvContext csvContext) {
return value != null ? ((Country) value).getIso2LetterCode() : "";
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ public void search(HttpServletResponse response,
@RequestParam(value = "format", defaultValue = "TSV") ExportFormat format,
DatasetSearchRequest searchRequest) throws IOException {

String headerValue = String.format("attachment; filename=\"gbifdatasets.%s\"", format.name().toLowerCase());
String headerValue = "attachment; filename=gbif_datasets." + format.name().toLowerCase();
response.setHeader(HttpHeaders.CONTENT_DISPOSITION, headerValue);

CsvWriter.datasetSearchResultCsvWriter(Iterables.datasetSearchResults(searchRequest, searchService),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -386,8 +386,7 @@ public void getDownloadStatistics(
@RequestParam(value = "publishingOrgKey", required = false) UUID publishingOrgKey) throws
IOException {

String headerValue = String.format("attachment; filename=\"download_statistics.%s\"",
format.name().toLowerCase());
String headerValue = "attachment; filename=download_statistics." + format.name().toLowerCase();
response.setHeader(HttpHeaders.CONTENT_DISPOSITION, headerValue);


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import org.gbif.api.model.common.export.ExportFormat;
import org.gbif.api.model.occurrence.DownloadStatistics;
import org.gbif.api.model.registry.search.DatasetSearchResult;
import org.gbif.api.vocabulary.Country;
import org.gbif.api.vocabulary.DatasetSubtype;
import org.gbif.api.vocabulary.DatasetType;
import org.gbif.api.vocabulary.License;
Expand Down Expand Up @@ -93,8 +94,11 @@ private DatasetSearchResult newDatasetSearchResult(int consecutive){
datasetSearchResult.setSubtype(DatasetSubtype.DERIVED_FROM_OCCURRENCE);
datasetSearchResult.setHostingOrganizationKey(UUID.randomUUID());
datasetSearchResult.setHostingOrganizationTitle("HostingOrganizationTitle" + consecutive);
datasetSearchResult.setHostingCountry(Country.DENMARK);
datasetSearchResult.setPublishingOrganizationKey(UUID.randomUUID());
datasetSearchResult.setPublishingOrganizationTitle("PublishingOrganizationTitle" + consecutive);
datasetSearchResult.setPublishingCountry(Country.COSTA_RICA);
datasetSearchResult.setEndorsingNodeKey(UUID.randomUUID());
datasetSearchResult.setNetworkKeys(Arrays.asList(UUID.randomUUID(), UUID.randomUUID()));
datasetSearchResult.setProjectIdentifier("project" + consecutive);
datasetSearchResult.setRecordCount(consecutive);
Expand All @@ -115,16 +119,19 @@ private void assertDatasetSearchResult(DatasetSearchResult datasetSearchResult,
assertEquals(datasetSearchResult.getSubtype().name(), line[5]);
assertEquals(datasetSearchResult.getHostingOrganizationKey().toString(), line[6]);
assertEquals(datasetSearchResult.getHostingOrganizationTitle(), line[7]);
assertEquals(datasetSearchResult.getPublishingOrganizationKey().toString(), line[8]);
assertEquals(datasetSearchResult.getPublishingOrganizationTitle(), line[9]);
assertEquals(datasetSearchResult.getHostingCountry().getIso2LetterCode(), line[8]);
assertEquals(datasetSearchResult.getPublishingOrganizationKey().toString(), line[9]);
assertEquals(datasetSearchResult.getPublishingOrganizationTitle(), line[10]);
assertEquals(datasetSearchResult.getPublishingCountry().getIso2LetterCode(), line[11]);
assertEquals(datasetSearchResult.getEndorsingNodeKey().toString(), line[12]);
assertTrue(datasetSearchResult.getNetworkKeys()
.containsAll(Arrays.stream(line[10].split(CsvWriter.ARRAY_DELIMITER))
.containsAll(Arrays.stream(line[13].split(CsvWriter.ARRAY_DELIMITER))
.map(UUID::fromString)
.collect(Collectors.toList())));
assertEquals(datasetSearchResult.getProjectIdentifier(), line[11]);
assertEquals(datasetSearchResult.getRecordCount(), Integer.parseInt(line[12]));
assertEquals(datasetSearchResult.getProjectIdentifier(), line[14]);
assertEquals(datasetSearchResult.getRecordCount(), Integer.parseInt(line[15]));
//Last characters has carriage return \r
assertEquals(datasetSearchResult.getNameUsagesCount(), Integer.parseInt(line[13].replace("\r","")));
assertEquals(datasetSearchResult.getNameUsagesCount(), Integer.parseInt(line[16].replace("\r","")));
}

@Test
Expand Down

0 comments on commit ef02ec7

Please sign in to comment.