From fc699c630f95392b47cf9a780a16ba24c1cc0601 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Fri, 13 Jul 2018 13:07:26 -0700 Subject: [PATCH 01/13] Clean Up Snapshot Create Rest API (#31779) Make SnapshotInfo and CreateSnapshotResponse parsers lenient for backwards compatibility. Remove extraneous fields from CreateSnapshotRequest toXContent. --- .../elasticsearch/client/SnapshotClient.java | 4 +- .../org/elasticsearch/client/SnapshotIT.java | 4 +- .../SnapshotClientDocumentationIT.java | 10 +++- .../snapshot/create_snapshot.asciidoc | 11 ++++ .../create/CreateSnapshotRequest.java | 4 +- .../create/CreateSnapshotResponse.java | 50 ++++++------------- .../action/support/IndicesOptions.java | 28 +++++------ .../elasticsearch/snapshots/SnapshotInfo.java | 25 ---------- .../create/CreateSnapshotRequestTests.java | 4 +- .../create/CreateSnapshotResponseTests.java | 6 +-- .../action/support/IndicesOptionsTests.java | 3 -- 11 files changed, 56 insertions(+), 93 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index fa147a338de0a..785469673747c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -174,7 +174,7 @@ public void verifyRepositoryAsync(VerifyRepositoryRequest verifyRepositoryReques * See Snapshot and Restore * API on elastic.co */ - public CreateSnapshotResponse createSnapshot(CreateSnapshotRequest createSnapshotRequest, RequestOptions options) + public CreateSnapshotResponse create(CreateSnapshotRequest createSnapshotRequest, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(createSnapshotRequest, RequestConverters::createSnapshot, options, CreateSnapshotResponse::fromXContent, emptySet()); @@ -186,7 +186,7 @@ public CreateSnapshotResponse createSnapshot(CreateSnapshotRequest createSnapsho * See Snapshot and Restore * API on elastic.co */ - public void createSnapshotAsync(CreateSnapshotRequest createSnapshotRequest, RequestOptions options, + public void createAsync(CreateSnapshotRequest createSnapshotRequest, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(createSnapshotRequest, RequestConverters::createSnapshot, options, CreateSnapshotResponse::fromXContent, listener, emptySet()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index 7ec2ee80f04ac..5dd288e4398d7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -57,8 +57,8 @@ private PutRepositoryResponse createTestRepository(String repository, String typ private CreateSnapshotResponse createTestSnapshot(CreateSnapshotRequest createSnapshotRequest) throws IOException { // assumes the repository already exists - return execute(createSnapshotRequest, highLevelClient().snapshot()::createSnapshot, - highLevelClient().snapshot()::createSnapshotAsync); + return execute(createSnapshotRequest, highLevelClient().snapshot()::create, + highLevelClient().snapshot()::createAsync); } public void testCreateRepository() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 48d01963e236d..d454b04718382 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -420,7 +420,7 @@ public void testSnapshotCreate() throws IOException { // end::create-snapshot-request-waitForCompletion // tag::create-snapshot-execute - CreateSnapshotResponse response = client.snapshot().createSnapshot(request, RequestOptions.DEFAULT); + CreateSnapshotResponse response = client.snapshot().create(request, RequestOptions.DEFAULT); // end::create-snapshot-execute // tag::create-snapshot-response @@ -428,6 +428,12 @@ public void testSnapshotCreate() throws IOException { // end::create-snapshot-response assertEquals(RestStatus.OK, status); + + // tag::create-snapshot-response-snapshot-info + SnapshotInfo snapshotInfo = response.getSnapshotInfo(); // <1> + // end::create-snapshot-response-snapshot-info + + assertNotNull(snapshotInfo); } public void testSnapshotCreateAsync() throws InterruptedException { @@ -455,7 +461,7 @@ public void onFailure(Exception exception) { listener = new LatchedActionListener<>(listener, latch); // tag::create-snapshot-execute-async - client.snapshot().createSnapshotAsync(request, RequestOptions.DEFAULT, listener); // <1> + client.snapshot().createAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::create-snapshot-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); diff --git a/docs/java-rest/high-level/snapshot/create_snapshot.asciidoc b/docs/java-rest/high-level/snapshot/create_snapshot.asciidoc index dbd31380a9b4b..971a6ee486711 100644 --- a/docs/java-rest/high-level/snapshot/create_snapshot.asciidoc +++ b/docs/java-rest/high-level/snapshot/create_snapshot.asciidoc @@ -73,11 +73,22 @@ include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-snapshot-r [[java-rest-high-snapshot-create-snapshot-sync]] ==== Synchronous Execution +Execute a `CreateSnapshotRequest` synchronously to receive a `CreateSnapshotResponse`. + ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-snapshot-execute] -------------------------------------------------- +Retrieve the `SnapshotInfo` from a `CreateSnapshotResponse` when the snapshot is fully created. +(The `waitForCompletion` parameter is `true`). + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-snapshot-response-snapshot-info] +-------------------------------------------------- +<1> The `SnapshotInfo` object. + [[java-rest-high-snapshot-create-snapshot-async]] ==== Asynchronous Execution diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index cd4923704ff35..fda45eed2f310 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -42,9 +42,9 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; import static org.elasticsearch.common.Strings.EMPTY_ARRAY; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** @@ -408,8 +408,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (indicesOptions != null) { indicesOptions.toXContent(builder, params); } - builder.field("wait_for_completion", waitForCompletion); - builder.field("master_node_timeout", masterNodeTimeout.toString()); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java index a2dc02c5c8299..d0a82e36a97da 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponse.java @@ -21,14 +21,16 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.snapshots.SnapshotInfo.SnapshotInfoBuilder; import java.io.IOException; import java.util.Objects; @@ -38,6 +40,14 @@ */ public class CreateSnapshotResponse extends ActionResponse implements ToXContentObject { + private static final ObjectParser PARSER = + new ObjectParser<>(CreateSnapshotResponse.class.getName(), true, CreateSnapshotResponse::new); + + static { + PARSER.declareObject(CreateSnapshotResponse::setSnapshotInfoFromBuilder, + SnapshotInfo.SNAPSHOT_INFO_PARSER, new ParseField("snapshot")); + } + @Nullable private SnapshotInfo snapshotInfo; @@ -48,8 +58,8 @@ public class CreateSnapshotResponse extends ActionResponse implements ToXContent CreateSnapshotResponse() { } - void setSnapshotInfo(SnapshotInfo snapshotInfo) { - this.snapshotInfo = snapshotInfo; + private void setSnapshotInfoFromBuilder(SnapshotInfoBuilder snapshotInfoBuilder) { + this.snapshotInfo = snapshotInfoBuilder.build(); } /** @@ -101,38 +111,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public static CreateSnapshotResponse fromXContent(XContentParser parser) throws IOException { - CreateSnapshotResponse createSnapshotResponse = new CreateSnapshotResponse(); - - parser.nextToken(); // move to '{' - - if (parser.currentToken() != Token.START_OBJECT) { - throw new IllegalArgumentException("unexpected token [" + parser.currentToken() + "], expected ['{']"); - } - - parser.nextToken(); // move to 'snapshot' || 'accepted' - - if ("snapshot".equals(parser.currentName())) { - createSnapshotResponse.snapshotInfo = SnapshotInfo.fromXContent(parser); - } else if ("accepted".equals(parser.currentName())) { - parser.nextToken(); // move to 'accepted' field value - - if (parser.booleanValue()) { - // ensure accepted is a boolean value - } - - parser.nextToken(); // move past 'true'/'false' - } else { - throw new IllegalArgumentException("unexpected token [" + parser.currentToken() + "] expected ['snapshot', 'accepted']"); - } - - if (parser.currentToken() != Token.END_OBJECT) { - throw new IllegalArgumentException("unexpected token [" + parser.currentToken() + "], expected ['}']"); - } - - parser.nextToken(); // move past '}' - - return createSnapshotResponse; + public static CreateSnapshotResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 7f4becc2d9afc..c707fed6ddf7b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -22,6 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestRequest; @@ -322,21 +323,6 @@ public static IndicesOptions fromMap(Map map, IndicesOptions def defaultSettings); } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startArray("expand_wildcards"); - for (WildcardStates expandWildcard : expandWildcards) { - builder.value(expandWildcard.toString().toLowerCase(Locale.ROOT)); - } - builder.endArray(); - builder.field("ignore_unavailable", ignoreUnavailable()); - builder.field("allow_no_indices", allowNoIndices()); - builder.field("forbid_aliases_to_multiple_indices", allowAliasesToMultipleIndices() == false); - builder.field("forbid_closed_indices", forbidClosedIndices()); - builder.field("ignore_aliases", ignoreAliases()); - return builder; - } - /** * Returns true if the name represents a valid name for one of the indices option * false otherwise @@ -366,6 +352,18 @@ public static IndicesOptions fromParameters(Object wildcardsString, Object ignor ); } + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray("expand_wildcards"); + for (WildcardStates expandWildcard : expandWildcards) { + builder.value(expandWildcard.toString().toLowerCase(Locale.ROOT)); + } + builder.endArray(); + builder.field("ignore_unavailable", ignoreUnavailable()); + builder.field("allow_no_indices", allowNoIndices()); + return builder; + } + /** * @return indices options that requires every specified index to exist, expands wildcards only to open indices and * allows that no indices are resolved from wildcard expressions (not returning an error). diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index a1f56a1e47376..bf3d337c49ec5 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -138,22 +138,6 @@ private void setShardFailures(List shardFailures) { this.shardFailures = shardFailures; } - private void ignoreVersion(String version) { - // ignore extra field - } - - private void ignoreStartTime(String startTime) { - // ignore extra field - } - - private void ignoreEndTime(String endTime) { - // ignore extra field - } - - private void ignoreDurationInMillis(long durationInMillis) { - // ignore extra field - } - public SnapshotInfo build() { SnapshotId snapshotId = new SnapshotId(snapshotName, snapshotUUID); @@ -195,10 +179,6 @@ private void setSuccessfulShards(int successfulShards) { int getSuccessfulShards() { return successfulShards; } - - private void ignoreFailedShards(int failedShards) { - // ignore extra field - } } public static final ObjectParser SNAPSHOT_INFO_PARSER = @@ -220,14 +200,9 @@ private void ignoreFailedShards(int failedShards) { SNAPSHOT_INFO_PARSER.declareInt(SnapshotInfoBuilder::setVersion, new ParseField(VERSION_ID)); SNAPSHOT_INFO_PARSER.declareObjectArray(SnapshotInfoBuilder::setShardFailures, SnapshotShardFailure.SNAPSHOT_SHARD_FAILURE_PARSER, new ParseField(FAILURES)); - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::ignoreVersion, new ParseField(VERSION)); - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::ignoreStartTime, new ParseField(START_TIME)); - SNAPSHOT_INFO_PARSER.declareString(SnapshotInfoBuilder::ignoreEndTime, new ParseField(END_TIME)); - SNAPSHOT_INFO_PARSER.declareLong(SnapshotInfoBuilder::ignoreDurationInMillis, new ParseField(DURATION_IN_MILLIS)); SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::setTotalShards, new ParseField(TOTAL)); SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::setSuccessfulShards, new ParseField(SUCCESSFUL)); - SHARD_STATS_PARSER.declareInt(ShardStatsBuilder::ignoreFailedShards, new ParseField(FAILED)); } private final SnapshotId snapshotId; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 1bde8ab572b72..0b598be6849cb 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -102,8 +102,8 @@ public void testToXContent() throws IOException { NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); Map map = parser.mapOrdered(); CreateSnapshotRequest processed = new CreateSnapshotRequest((String)map.get("repository"), (String)map.get("snapshot")); - processed.waitForCompletion((boolean)map.getOrDefault("wait_for_completion", false)); - processed.masterNodeTimeout((String)map.getOrDefault("master_node_timeout", "30s")); + processed.waitForCompletion(original.waitForCompletion()); + processed.masterNodeTimeout(original.masterNodeTimeout()); processed.source(map); assertEquals(original, processed); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java index bbfc9755bf215..bbb11fc6feef0 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotResponseTests.java @@ -40,7 +40,7 @@ protected CreateSnapshotResponse doParseInstance(XContentParser parser) throws I @Override protected boolean supportsUnknownFields() { - return false; + return true; } @Override @@ -63,9 +63,7 @@ protected CreateSnapshotResponse createTestInstance() { boolean globalState = randomBoolean(); - CreateSnapshotResponse response = new CreateSnapshotResponse(); - response.setSnapshotInfo( + return new CreateSnapshotResponse( new SnapshotInfo(snapshotId, indices, startTime, reason, endTime, totalShards, shardFailures, globalState)); - return response; } } diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 84904107c2316..e36fe90ea92bb 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -324,8 +324,5 @@ public void testToXContent() throws IOException { } assertEquals(map.get("ignore_unavailable"), options.contains(Option.IGNORE_UNAVAILABLE)); assertEquals(map.get("allow_no_indices"), options.contains(Option.ALLOW_NO_INDICES)); - assertEquals(map.get("forbid_aliases_to_multiple_indices"), options.contains(Option.FORBID_ALIASES_TO_MULTIPLE_INDICES)); - assertEquals(map.get("forbid_closed_indices"), options.contains(Option.FORBID_CLOSED_INDICES)); - assertEquals(map.get("ignore_aliases"), options.contains(Option.IGNORE_ALIASES)); } } From ba8a54aded09e8613e5d12f8e83e8bcb5841e01b Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Fri, 13 Jul 2018 09:33:27 -0700 Subject: [PATCH 02/13] HLRC: Add xpack usage api (#31975) This commit adds the _xpack/usage api to the high level rest client. Currently in the transport api, the usage data is exposed in a limited fashion, at most giving one level of helper methods for the inner keys of data, but then exposing thos subobjects as maps of objects. Rather than making parsers for every set of usage data from each feature, this PR exposes the entire set of usage data as a map of maps. --- .../client/RequestConverters.java | 8 +++ .../org/elasticsearch/client/XPackClient.java | 23 ++++++++ .../MiscellaneousDocumentationIT.java | 49 ++++++++++++++++ .../miscellaneous/x-pack-usage.asciidoc | 54 ++++++++++++++++++ .../action/TransportXPackUsageAction.java | 1 + .../xpack/core/action/XPackUsageAction.java | 1 + .../xpack/core/action/XPackUsageRequest.java | 18 ------ .../core/action/XPackUsageRequestBuilder.java | 1 + .../cluster/ClusterStatsCollectorTests.java | 2 +- .../watcher/WatcherXpackUsageStatsTests.java | 2 +- .../protocol/xpack/XPackUsageRequest.java | 31 ++++++++++ .../protocol/xpack/XPackUsageResponse.java | 57 +++++++++++++++++++ 12 files changed, 227 insertions(+), 20 deletions(-) create mode 100644 docs/java-rest/high-level/miscellaneous/x-pack-usage.asciidoc delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequest.java create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java create mode 100644 x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index de2ac130f79e1..da21f6d5ecd77 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -105,6 +105,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.script.mustache.MultiSearchTemplateRequest; import org.elasticsearch.script.mustache.SearchTemplateRequest; @@ -1092,6 +1093,13 @@ static Request xPackInfo(XPackInfoRequest infoRequest) { return request; } + static Request xpackUsage(XPackUsageRequest usageRequest) { + Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/usage"); + Params parameters = new Params(request); + parameters.withMasterTimeout(usageRequest.masterNodeTimeout()); + return request; + } + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java index 5942bfa35a477..a497619b987bd 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.protocol.xpack.XPackUsageResponse; import java.io.IOException; @@ -70,4 +72,25 @@ public void infoAsync(XPackInfoRequest request, RequestOptions options, restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xPackInfo, options, XPackInfoResponse::fromXContent, listener, emptySet()); } + + /** + * Fetch usage information about X-Pack features from the cluster. + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public XPackUsageResponse usage(XPackUsageRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::xpackUsage, options, + XPackUsageResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously fetch usage information about X-Pack features from the cluster. + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void usageAsync(XPackUsageRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xpackUsage, options, + XPackUsageResponse::fromXContent, listener, emptySet()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java index 639a5ce7ceef6..f27949e05f909 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MiscellaneousDocumentationIT.java @@ -35,12 +35,17 @@ import org.elasticsearch.protocol.xpack.XPackInfoResponse.BuildInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.FeatureSetsInfo; import org.elasticsearch.protocol.xpack.XPackInfoResponse.LicenseInfo; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.protocol.xpack.XPackUsageResponse; import java.io.IOException; import java.util.EnumSet; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.is; + /** * Documentation for miscellaneous APIs in the high level java client. * Code wrapped in {@code tag} and {@code end} tags is included in the docs. @@ -130,6 +135,50 @@ public void onFailure(Exception e) { } } + public void testXPackUsage() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::x-pack-usage-execute + XPackUsageRequest request = new XPackUsageRequest(); + XPackUsageResponse response = client.xpack().usage(request, RequestOptions.DEFAULT); + //end::x-pack-usage-execute + + //tag::x-pack-usage-response + Map> usages = response.getUsages(); + Map monitoringUsage = usages.get("monitoring"); + assertThat(monitoringUsage.get("available"), is(true)); + assertThat(monitoringUsage.get("enabled"), is(true)); + assertThat(monitoringUsage.get("collection_enabled"), is(false)); + //end::x-pack-usage-response + } + { + XPackUsageRequest request = new XPackUsageRequest(); + // tag::x-pack-usage-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(XPackUsageResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::x-pack-usage-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::x-pack-usage-execute-async + client.xpack().usageAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::x-pack-usage-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testInitializationFromClientBuilder() throws IOException { //tag::rest-high-level-client-init RestHighLevelClient client = new RestHighLevelClient( diff --git a/docs/java-rest/high-level/miscellaneous/x-pack-usage.asciidoc b/docs/java-rest/high-level/miscellaneous/x-pack-usage.asciidoc new file mode 100644 index 0000000000000..0927ae71c0bf5 --- /dev/null +++ b/docs/java-rest/high-level/miscellaneous/x-pack-usage.asciidoc @@ -0,0 +1,54 @@ +[[java-rest-high-x-pack-usage]] +=== X-Pack Usage API + +[[java-rest-high-x-pack-usage-execution]] +==== Execution + +Detailed information about the usage of features from {xpack} can be +retrieved using the `usage()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-execute] +-------------------------------------------------- + +[[java-rest-high-x-pack-info-response]] +==== Response + +The returned `XPackUsageResponse` contains a `Map` keyed by feature name. +Every feature map has an `available` key, indicating whether that +feature is available given the current license, and an `enabled` key, +indicating whether that feature is currently enabled. Other keys +are specific to each feature. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-response] +-------------------------------------------------- + +[[java-rest-high-x-pack-usage-async]] +==== Asynchronous Execution + +This request can be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-execute-async] +-------------------------------------------------- +<1> The call to execute the usage api and the `ActionListener` to use when +the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `XPackUsageResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[x-pack-usage-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of failure. The raised exception is provided as an argument diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java index f3abad5e68bb3..6b7d5b96d2024 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportXPackUsageAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackFeatureSet; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java index 252283a1dfc7d..87033eac21aee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageAction.java @@ -7,6 +7,7 @@ import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; public class XPackUsageAction extends Action { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequest.java deleted file mode 100644 index d578249c147c3..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequest.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.action; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.master.MasterNodeRequest; - -public class XPackUsageRequest extends MasterNodeRequest { - - @Override - public ActionRequestValidationException validate() { - return null; - } - -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequestBuilder.java index 789460f133969..92c2ba75ec170 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageRequestBuilder.java @@ -7,6 +7,7 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; public class XPackUsageRequestBuilder extends MasterNodeOperationRequestBuilder { diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java index 6784b00361bc1..49355d51495ec 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsCollectorTests.java @@ -26,7 +26,7 @@ import org.elasticsearch.xpack.core.XPackField; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.action.XPackUsageAction; -import org.elasticsearch.xpack.core.action.XPackUsageRequest; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.monitoring.MonitoredSystem; import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherXpackUsageStatsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherXpackUsageStatsTests.java index 3a314640d742a..7c07c98eb4725 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherXpackUsageStatsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherXpackUsageStatsTests.java @@ -7,7 +7,7 @@ import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.action.XPackUsageAction; -import org.elasticsearch.xpack.core.action.XPackUsageRequest; +import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; import org.elasticsearch.xpack.watcher.test.AbstractWatcherIntegrationTestCase; diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java new file mode 100644 index 0000000000000..f5f6d9d949b7f --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageRequest.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; + +public class XPackUsageRequest extends MasterNodeRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + +} diff --git a/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java new file mode 100644 index 0000000000000..3459403bd6124 --- /dev/null +++ b/x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/XPackUsageResponse.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.protocol.xpack; + +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Response object from calling the xpack usage api. + * + * Usage information for each feature is accessible through {@link #getUsages()}. + */ +public class XPackUsageResponse { + + private final Map> usages; + + private XPackUsageResponse(Map> usages) throws IOException { + this.usages = usages; + } + + @SuppressWarnings("unchecked") + private static Map castMap(Object value) { + return (Map)value; + } + + /** Return a map from feature name to usage information for that feature. */ + public Map> getUsages() { + return usages; + } + + public static XPackUsageResponse fromXContent(XContentParser parser) throws IOException { + Map rawMap = parser.map(); + Map> usages = rawMap.entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, e -> castMap(e.getValue()))); + return new XPackUsageResponse(usages); + } +} From be06fba821d20ce516ceb258948e4972cd771520 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sun, 15 Jul 2018 08:45:41 +0200 Subject: [PATCH 03/13] Cleanup Duplication in `PainlessScriptEngine` (#31991) (#32061) * Cleanup Duplication in `PainlessScriptEngine` * Extract duplicate building of compiler settings to method * Remove dead method params + dead constant in `ScriptProcessor` --- .../painless/PainlessScriptEngine.java | 85 ++++++------------- 1 file changed, 26 insertions(+), 59 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index ae1944c9bd3a9..4560fd85a6589 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -366,44 +366,7 @@ private void writeNeedsMethods(Class clazz, ClassWriter writer, MainMethodRes } Object compile(Compiler compiler, String scriptName, String source, Map params, Object... args) { - final CompilerSettings compilerSettings; - - if (params.isEmpty()) { - // Use the default settings. - compilerSettings = defaultCompilerSettings; - } else { - // Use custom settings specified by params. - compilerSettings = new CompilerSettings(); - - // Except regexes enabled - this is a node level setting and can't be changed in the request. - compilerSettings.setRegexesEnabled(defaultCompilerSettings.areRegexesEnabled()); - - Map copy = new HashMap<>(params); - - String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER); - if (value != null) { - compilerSettings.setMaxLoopCounter(Integer.parseInt(value)); - } - - value = copy.remove(CompilerSettings.PICKY); - if (value != null) { - compilerSettings.setPicky(Boolean.parseBoolean(value)); - } - - value = copy.remove(CompilerSettings.INITIAL_CALL_SITE_DEPTH); - if (value != null) { - compilerSettings.setInitialCallSiteDepth(Integer.parseInt(value)); - } - - value = copy.remove(CompilerSettings.REGEX_ENABLED.getKey()); - if (value != null) { - throw new IllegalArgumentException("[painless.regex.enabled] can only be set on node startup."); - } - - if (!copy.isEmpty()) { - throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + copy); - } - } + final CompilerSettings compilerSettings = buildCompilerSettings(params); // Check we ourselves are not being called by unprivileged code. SpecialPermission.check(); @@ -434,14 +397,33 @@ public Object run() { }, COMPILATION_CONTEXT); // Note that it is safe to catch any of the following errors since Painless is stateless. } catch (OutOfMemoryError | StackOverflowError | VerifyError | Exception e) { - throw convertToScriptException(scriptName == null ? source : scriptName, source, e); + throw convertToScriptException(source, e); } } void compile(Compiler compiler, Loader loader, MainMethodReserved reserved, String scriptName, String source, Map params) { - final CompilerSettings compilerSettings; + final CompilerSettings compilerSettings = buildCompilerSettings(params); + + try { + // Drop all permissions to actually compile the code itself. + AccessController.doPrivileged(new PrivilegedAction() { + @Override + public Void run() { + String name = scriptName == null ? source : scriptName; + compiler.compile(loader, reserved, name, source, compilerSettings); + + return null; + } + }, COMPILATION_CONTEXT); + // Note that it is safe to catch any of the following errors since Painless is stateless. + } catch (OutOfMemoryError | StackOverflowError | VerifyError | Exception e) { + throw convertToScriptException(source, e); + } + } + private CompilerSettings buildCompilerSettings(Map params) { + CompilerSettings compilerSettings; if (params.isEmpty()) { // Use the default settings. compilerSettings = defaultCompilerSettings; @@ -478,25 +460,10 @@ void compile(Compiler compiler, Loader loader, MainMethodReserved reserved, throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + copy); } } - - try { - // Drop all permissions to actually compile the code itself. - AccessController.doPrivileged(new PrivilegedAction() { - @Override - public Void run() { - String name = scriptName == null ? source : scriptName; - compiler.compile(loader, reserved, name, source, compilerSettings); - - return null; - } - }, COMPILATION_CONTEXT); - // Note that it is safe to catch any of the following errors since Painless is stateless. - } catch (OutOfMemoryError | StackOverflowError | VerifyError | Exception e) { - throw convertToScriptException(scriptName == null ? source : scriptName, source, e); - } + return compilerSettings; } - private ScriptException convertToScriptException(String scriptName, String scriptSource, Throwable t) { + private ScriptException convertToScriptException(String scriptSource, Throwable t) { // create a script stack: this is just the script portion List scriptStack = new ArrayList<>(); for (StackTraceElement element : t.getStackTrace()) { @@ -507,7 +474,7 @@ private ScriptException convertToScriptException(String scriptName, String scrip scriptStack.add("<<< unknown portion of script >>>"); } else { offset--; // offset is 1 based, line numbers must be! - int startOffset = getPreviousStatement(scriptSource, offset); + int startOffset = getPreviousStatement(offset); int endOffset = getNextStatement(scriptSource, offset); StringBuilder snippet = new StringBuilder(); if (startOffset > 0) { @@ -535,7 +502,7 @@ private ScriptException convertToScriptException(String scriptName, String scrip } // very simple heuristic: +/- 25 chars. can be improved later. - private int getPreviousStatement(String scriptSource, int offset) { + private int getPreviousStatement(int offset) { return Math.max(0, offset - 25); } From 1639aab300384d4d716a27ef4b3d86a00bacfdfb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Sun, 15 Jul 2018 08:46:00 +0200 Subject: [PATCH 04/13] Replace Ingest ScriptContext with Custom Interface (#32003) (#32060) * Replace Ingest ScriptContext with Custom Interface * Make org.elasticsearch.ingest.common.ScriptProcessorTests#testScripting more precise * Don't mock script factory in ScriptProcessorTests * Adjust mock script plugin in IT for new API --- .../ingest/common/ScriptProcessor.java | 10 ++-- .../ingest/common/IngestRestartIT.java | 4 +- .../ingest/common/ScriptProcessorTests.java | 38 ++++++++------ .../script/ExecutableScript.java | 1 - .../elasticsearch/script/IngestScript.java | 52 +++++++++++++++++++ .../elasticsearch/script/ScriptModule.java | 2 +- .../script/ScriptServiceTests.java | 8 +-- .../script/MockScriptEngine.java | 8 +++ 8 files changed, 91 insertions(+), 32 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/script/IngestScript.java diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index ad574115208da..13ec666381529 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -26,7 +26,7 @@ import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.Processor; -import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.IngestScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.ScriptService; @@ -71,10 +71,8 @@ public final class ScriptProcessor extends AbstractProcessor { */ @Override public void execute(IngestDocument document) { - ExecutableScript.Factory factory = scriptService.compile(script, ExecutableScript.INGEST_CONTEXT); - ExecutableScript executableScript = factory.newInstance(script.getParams()); - executableScript.setNextVar("ctx", document.getSourceAndMetadata()); - executableScript.run(); + IngestScript.Factory factory = scriptService.compile(script, IngestScript.CONTEXT); + factory.newInstance(script.getParams()).execute(document.getSourceAndMetadata()); } @Override @@ -144,7 +142,7 @@ public ScriptProcessor create(Map registry, String pr // verify script is able to be compiled before successfully creating processor. try { - scriptService.compile(script, ExecutableScript.INGEST_CONTEXT); + scriptService.compile(script, IngestScript.CONTEXT); } catch (ScriptException e) { throw newConfigurationException(TYPE, processorTag, scriptPropertyUsed, e); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 9658637f16444..8c3976d2b175c 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -58,9 +58,7 @@ protected boolean ignoreExternalCluster() { public static class CustomScriptPlugin extends MockScriptPlugin { @Override protected Map, Object>> pluginScripts() { - return Collections.singletonMap("my_script", script -> { - @SuppressWarnings("unchecked") - Map ctx = (Map) script.get("ctx"); + return Collections.singletonMap("my_script", ctx -> { ctx.put("z", 0); return null; }); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java index 1004a41bcc592..72bc337e9c9f7 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/ScriptProcessorTests.java @@ -19,22 +19,22 @@ package org.elasticsearch.ingest.common; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.ingest.IngestDocument; import org.elasticsearch.ingest.RandomDocumentPicks; -import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.core.Is.is; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class ScriptProcessorTests extends ESTestCase { @@ -42,24 +42,28 @@ public void testScripting() throws Exception { int randomBytesIn = randomInt(); int randomBytesOut = randomInt(); int randomBytesTotal = randomBytesIn + randomBytesOut; - - ScriptService scriptService = mock(ScriptService.class); - Script script = mockScript("_script"); - ExecutableScript.Factory factory = mock(ExecutableScript.Factory.class); - ExecutableScript executableScript = mock(ExecutableScript.class); - when(scriptService.compile(script, ExecutableScript.INGEST_CONTEXT)).thenReturn(factory); - when(factory.newInstance(any())).thenReturn(executableScript); + String scriptName = "script"; + ScriptService scriptService = new ScriptService(Settings.builder().build(), + Collections.singletonMap( + Script.DEFAULT_SCRIPT_LANG, new MockScriptEngine( + Script.DEFAULT_SCRIPT_LANG, + Collections.singletonMap( + scriptName, ctx -> { + ctx.put("bytes_total", randomBytesTotal); + return null; + } + ) + ) + ), + new HashMap<>(ScriptModule.CORE_CONTEXTS) + ); + Script script = new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()); Map document = new HashMap<>(); document.put("bytes_in", randomInt()); document.put("bytes_out", randomInt()); IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), document); - doAnswer(invocationOnMock -> { - ingestDocument.setFieldValue("bytes_total", randomBytesTotal); - return null; - }).when(executableScript).run(); - ScriptProcessor processor = new ScriptProcessor(randomAlphaOfLength(10), script, scriptService); processor.execute(ingestDocument); diff --git a/server/src/main/java/org/elasticsearch/script/ExecutableScript.java b/server/src/main/java/org/elasticsearch/script/ExecutableScript.java index e87b7cdf3890a..2f7a01c37980d 100644 --- a/server/src/main/java/org/elasticsearch/script/ExecutableScript.java +++ b/server/src/main/java/org/elasticsearch/script/ExecutableScript.java @@ -50,5 +50,4 @@ interface Factory { // TODO: remove these once each has its own script interface ScriptContext AGGS_CONTEXT = new ScriptContext<>("aggs_executable", Factory.class); ScriptContext UPDATE_CONTEXT = new ScriptContext<>("update", Factory.class); - ScriptContext INGEST_CONTEXT = new ScriptContext<>("ingest", Factory.class); } diff --git a/server/src/main/java/org/elasticsearch/script/IngestScript.java b/server/src/main/java/org/elasticsearch/script/IngestScript.java new file mode 100644 index 0000000000000..f357394ed31f0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/script/IngestScript.java @@ -0,0 +1,52 @@ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import java.util.Map; + +/** + * A script used by the Ingest Script Processor. + */ +public abstract class IngestScript { + + public static final String[] PARAMETERS = { "ctx" }; + + /** The context used to compile {@link IngestScript} factories. */ + public static final ScriptContext CONTEXT = new ScriptContext<>("ingest", Factory.class); + + /** The generic runtime parameters for the script. */ + private final Map params; + + public IngestScript(Map params) { + this.params = params; + } + + /** Return the parameters for this script. */ + public Map getParams() { + return params; + } + + public abstract void execute(Map ctx); + + public interface Factory { + IngestScript newInstance(Map params); + } +} diff --git a/server/src/main/java/org/elasticsearch/script/ScriptModule.java b/server/src/main/java/org/elasticsearch/script/ScriptModule.java index f0e075eac7d93..695b19d88b2ff 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -48,7 +48,7 @@ public class ScriptModule { ExecutableScript.CONTEXT, ExecutableScript.AGGS_CONTEXT, ExecutableScript.UPDATE_CONTEXT, - ExecutableScript.INGEST_CONTEXT, + IngestScript.CONTEXT, FilterScript.CONTEXT, SimilarityScript.CONTEXT, SimilarityWeightScript.CONTEXT, diff --git a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index b35fcbcc03c17..585f860165160 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -168,7 +168,7 @@ public void testAllowAllScriptContextSettings() throws IOException { assertCompileAccepted("painless", "script", ScriptType.INLINE, SearchScript.CONTEXT); assertCompileAccepted("painless", "script", ScriptType.INLINE, SearchScript.AGGS_CONTEXT); assertCompileAccepted("painless", "script", ScriptType.INLINE, ExecutableScript.UPDATE_CONTEXT); - assertCompileAccepted("painless", "script", ScriptType.INLINE, ExecutableScript.INGEST_CONTEXT); + assertCompileAccepted("painless", "script", ScriptType.INLINE, IngestScript.CONTEXT); } public void testAllowSomeScriptTypeSettings() throws IOException { @@ -209,13 +209,13 @@ public void testAllowNoScriptContextSettings() throws IOException { } public void testCompileNonRegisteredContext() throws IOException { - contexts.remove(ExecutableScript.INGEST_CONTEXT.name); + contexts.remove(IngestScript.CONTEXT.name); buildScriptService(Settings.EMPTY); String type = scriptEngine.getType(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - scriptService.compile(new Script(ScriptType.INLINE, type, "test", Collections.emptyMap()), ExecutableScript.INGEST_CONTEXT)); - assertThat(e.getMessage(), containsString("script context [" + ExecutableScript.INGEST_CONTEXT.name + "] not supported")); + scriptService.compile(new Script(ScriptType.INLINE, type, "test", Collections.emptyMap()), IngestScript.CONTEXT)); + assertThat(e.getMessage(), containsString("script context [" + IngestScript.CONTEXT.name + "] not supported")); } public void testCompileCountedInCompilationStats() throws IOException { diff --git a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java index e608bd13d2559..8e40e4bcf1468 100644 --- a/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java @@ -88,6 +88,14 @@ public T compile(String name, String source, ScriptContext context, Map new IngestScript(parameters) { + @Override + public void execute(Map ctx) { + script.apply(ctx); + } + }; + return context.factoryClazz.cast(factory); } else if (context.instanceClazz.equals(TemplateScript.class)) { TemplateScript.Factory factory = vars -> { // TODO: need a better way to implement all these new contexts From d4d2df7907c6c776c8c98c1c170e650764744b16 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 16 Jul 2018 10:53:51 +0200 Subject: [PATCH 05/13] [Test] Mute MlJobIT#testDeleteJobAfterMissingAliases Relates #32034 --- .../java/org/elasticsearch/xpack/ml/integration/MlJobIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java index 4c0019fb3193a..ee838ec5bc565 100644 --- a/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java +++ b/x-pack/qa/ml-native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/MlJobIT.java @@ -438,6 +438,7 @@ public void testDeleteJobAfterMissingIndex() throws Exception { client().performRequest("get", MachineLearning.BASE_PATH + "anomaly_detectors/" + jobId + "/_stats")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32034") public void testDeleteJobAfterMissingAliases() throws Exception { String jobId = "delete-job-after-missing-alias-job"; String readAliasName = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); From 69e61c794f1fe2d8a4c15a429922086c55e00815 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Mon, 16 Jul 2018 11:22:42 +0200 Subject: [PATCH 06/13] [Tests] Fix failure due to changes exception message (#32036) Java 11 seems to get more verbose on the ClassCastException we check for in SearchDocumentationIT. This changes the test from asserting the exact exception message to only checking the two classes involved are part of the message. Closes #32029 --- .../client/documentation/SearchDocumentationIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 6c78c88eeec66..780f315876996 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -337,8 +337,9 @@ public void testSearchRequestAggregations() throws IOException { Range range = aggregations.get("by_company"); // <1> // end::search-request-aggregations-get-wrongCast } catch (ClassCastException ex) { - assertEquals("org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms" - + " cannot be cast to org.elasticsearch.search.aggregations.bucket.range.Range", ex.getMessage()); + String message = ex.getMessage(); + assertThat(message, containsString("org.elasticsearch.search.aggregations.bucket.terms.ParsedStringTerms")); + assertThat(message, containsString("org.elasticsearch.search.aggregations.bucket.range.Range")); } assertEquals(3, elasticBucket.getDocCount()); assertEquals(30, avg, 0.0); From ac60fcac8cba18c17bd5be4b27356a46a14519c9 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 16 Jul 2018 15:25:45 +0200 Subject: [PATCH 07/13] [Rollup] Fix duplicate field names in test (#32075) This commit ensures that random field names do not clash with the explicit field names set by the tests. Closes #32067 --- .../elasticsearch/xpack/core/rollup/ConfigTestHelpers.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java index 3d82ac118f503..3e4e4a84d2f8e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -94,7 +94,7 @@ public static DateHistoGroupConfig.Builder getDateHisto() { if (ESTestCase.randomBoolean()) { dateHistoBuilder.setDelay(new DateHistogramInterval(randomPositiveTimeValue())); } - dateHistoBuilder.setField(ESTestCase.randomAlphaOfLengthBetween(1, 10 )); + dateHistoBuilder.setField(ESTestCase.randomAlphaOfLengthBetween(5, 10)); return dateHistoBuilder; } @@ -112,8 +112,8 @@ public static TermsGroupConfig.Builder getTerms() { } public static List getFields() { - return IntStream.range(0, ESTestCase.randomIntBetween(1,10)) - .mapToObj(n -> ESTestCase.randomAlphaOfLengthBetween(1,10)) + return IntStream.range(0, ESTestCase.randomIntBetween(1, 10)) + .mapToObj(n -> ESTestCase.randomAlphaOfLengthBetween(5, 10)) .collect(Collectors.toList()); } From 69af822a557e2f3ccf120e06b675412e748b5142 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 16 Jul 2018 10:47:46 -0400 Subject: [PATCH 08/13] [Rollup] Replace RollupIT with a ESRestTestCase version (#31977) The old RollupIT was a node IT, an flaky for a number of reasons. This new version is an ESRestTestCase and should be a little more robust. This was added to the multi-node QA tests as that seemed like the most appropriate location. It didn't seem necessary to create a whole new QA module. Note: The only test that was ported was the "Big" test for validating a larger dataset. The rest of the tests are represented in existing yaml tests. Closes #31258 Closes #30232 Related to #30290 --- x-pack/plugin/rollup/build.gradle | 28 - .../elasticsearch/xpack/rollup/RollupIT.java | 497 ------------------ .../elasticsearch/multi_node/RollupIT.java | 326 ++++++++++++ 3 files changed, 326 insertions(+), 525 deletions(-) delete mode 100644 x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java create mode 100644 x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java diff --git a/x-pack/plugin/rollup/build.gradle b/x-pack/plugin/rollup/build.gradle index 18ef7abee5c64..ff9c30ed9a934 100644 --- a/x-pack/plugin/rollup/build.gradle +++ b/x-pack/plugin/rollup/build.gradle @@ -1,6 +1,3 @@ -import com.carrotsearch.gradle.junit4.RandomizedTestingTask -import org.elasticsearch.gradle.BuildPlugin - evaluationDependsOn(xpackModule('core')) apply plugin: 'elasticsearch.esplugin' @@ -23,33 +20,8 @@ dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } -dependencyLicenses { - ignoreSha 'x-pack-core' -} - run { plugin xpackModule('core') } integTest.enabled = false - - -// Instead we create a separate task to run the -// tests based on ESIntegTestCase -task internalClusterTest(type: RandomizedTestingTask, - group: JavaBasePlugin.VERIFICATION_GROUP, - description: 'Multi-node tests', - dependsOn: test.dependsOn) { - configure(BuildPlugin.commonTestConfig(project)) - classpath = project.test.classpath - testClassesDirs = project.test.testClassesDirs - include '**/*IT.class' - systemProperty 'es.set.netty.runtime.available.processors', 'false' -} -check.dependsOn internalClusterTest -internalClusterTest.mustRunAfter test - -// also add an "alias" task to make typing on the command line easier task icTest { -task icTest { - dependsOn internalClusterTest -} diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java deleted file mode 100644 index 3f930cb42981d..0000000000000 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupIT.java +++ /dev/null @@ -1,497 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.rollup; - -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.analysis.common.CommonAnalysisPlugin; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.MatchAllQueryBuilder; -import org.elasticsearch.license.LicenseService; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.Aggregation; -import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.XPackSettings; -import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction; -import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; -import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; -import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; -import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; -import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; -import org.elasticsearch.xpack.core.rollup.job.DateHistoGroupConfig; -import org.elasticsearch.xpack.core.rollup.job.GroupConfig; -import org.elasticsearch.xpack.core.rollup.job.IndexerState; -import org.elasticsearch.xpack.core.rollup.job.MetricConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; -import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; - -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; -import static org.hamcrest.core.IsEqual.equalTo; - -@ThreadLeakScope(ThreadLeakScope.Scope.NONE) -public class RollupIT extends ESIntegTestCase { - - private String taskId = "test-bigID"; - - @Override - protected boolean ignoreExternalCluster() { - return true; - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateRollup.class, CommonAnalysisPlugin.class, Netty4Plugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - Settings.Builder builder = Settings.builder(); - builder.put(XPackSettings.ROLLUP_ENABLED.getKey(), true); - builder.put(XPackSettings.SECURITY_ENABLED.getKey(), false); - builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); - return builder.build(); - } - - @Override - protected Settings externalClusterClientSettings() { - return nodeSettings(0); - } - - @Override - protected Settings transportClientSettings() { - return Settings.builder().put(super.transportClientSettings()) - .put(XPackSettings.ROLLUP_ENABLED.getKey(), true) - .put(XPackSettings.SECURITY_ENABLED.getKey(), false) - .build(); - } - - @Before - public void createIndex() { - client().admin().indices().prepareCreate("test-1").addMapping("doc", "{\"doc\": {\"properties\": {" + - "\"date_histo\": {\"type\": \"date\"}, " + - "\"histo\": {\"type\": \"integer\"}, " + - "\"terms\": {\"type\": \"keyword\"}}}}", XContentType.JSON).get(); - client().admin().cluster().prepareHealth("test-1").setWaitForYellowStatus().get(); - - BulkRequestBuilder bulk = client().prepareBulk(); - Map source = new HashMap<>(3); - for (int i = 0; i < 20; i++) { - for (int j = 0; j < 20; j++) { - for (int k = 0; k < 20; k++) { - source.put("date_histo", new DateTime().minusDays(i).toString()); - source.put("histo", Integer.toString(j * 100)); - source.put("terms", Integer.toString(k * 100)); - source.put("foo", k); - bulk.add(new IndexRequest("test-1", "doc").source(source)); - source.clear(); - } - } - } - bulk.get(); - client().admin().indices().prepareRefresh("test-1").get(); - } - - public void testGetJob() throws ExecutionException, InterruptedException { - MetricConfig metricConfig = new MetricConfig.Builder() - .setField("foo") - .setMetrics(Arrays.asList("sum", "min", "max", "avg")) - .build(); - - DateHistoGroupConfig.Builder datehistoGroupConfig = new DateHistoGroupConfig.Builder(); - datehistoGroupConfig.setField("date_histo"); - datehistoGroupConfig.setInterval(new DateHistogramInterval("1d")); - - GroupConfig.Builder groupConfig = new GroupConfig.Builder(); - groupConfig.setDateHisto(datehistoGroupConfig.build()); - - - RollupJobConfig.Builder config = new RollupJobConfig.Builder(); - config.setIndexPattern("test-1"); - config.setRollupIndex("rolled"); - config.setId("testGet"); - config.setGroupConfig(groupConfig.build()); - config.setMetricsConfig(Collections.singletonList(metricConfig)); - config.setCron("* * * * * ? *"); - config.setPageSize(10); - - PutRollupJobAction.Request request = new PutRollupJobAction.Request(); - request.setConfig(config.build()); - client().execute(PutRollupJobAction.INSTANCE, request).get(); - - GetRollupJobsAction.Request getRequest = new GetRollupJobsAction.Request("testGet"); - GetRollupJobsAction.Response response = client().execute(GetRollupJobsAction.INSTANCE, getRequest).get(); - assertThat(response.getJobs().size(), equalTo(1)); - assertThat(response.getJobs().get(0).getJob().getId(), equalTo("testGet")); - } - - public void testIndexPattern() throws Exception { - MetricConfig metricConfig = new MetricConfig.Builder() - .setField("foo") - .setMetrics(Arrays.asList("sum", "min", "max", "avg")) - .build(); - - DateHistoGroupConfig.Builder datehistoGroupConfig = new DateHistoGroupConfig.Builder(); - datehistoGroupConfig.setField("date_histo"); - datehistoGroupConfig.setInterval(new DateHistogramInterval("1d")); - - GroupConfig.Builder groupConfig = new GroupConfig.Builder(); - groupConfig.setDateHisto(datehistoGroupConfig.build()); - - - RollupJobConfig.Builder config = new RollupJobConfig.Builder(); - config.setIndexPattern("test-*"); - config.setId("testIndexPattern"); - config.setRollupIndex("rolled"); - config.setGroupConfig(groupConfig.build()); - config.setMetricsConfig(Collections.singletonList(metricConfig)); - config.setCron("* * * * * ? *"); - config.setPageSize(10); - - PutRollupJobAction.Request request = new PutRollupJobAction.Request(); - request.setConfig(config.build()); - client().execute(PutRollupJobAction.INSTANCE, request).get(); - - StartRollupJobAction.Request startRequest = new StartRollupJobAction.Request("testIndexPattern"); - StartRollupJobAction.Response startResponse = client().execute(StartRollupJobAction.INSTANCE, startRequest).get(); - Assert.assertThat(startResponse.isStarted(), equalTo(true)); - - // Make sure it started - ESTestCase.assertBusy(() -> { - RollupJobStatus rollupJobStatus = getRollupJobStatus("testIndexPattern"); - if (rollupJobStatus == null) { - fail("null"); - } - - IndexerState state = rollupJobStatus.getIndexerState(); - assertTrue(state.equals(IndexerState.STARTED) || state.equals(IndexerState.INDEXING)); - }, 60, TimeUnit.SECONDS); - - // And wait for it to finish - ESTestCase.assertBusy(() -> { - RollupJobStatus rollupJobStatus = getRollupJobStatus("testIndexPattern"); - if (rollupJobStatus == null) { - fail("null"); - } - - IndexerState state = rollupJobStatus.getIndexerState(); - assertTrue(state.equals(IndexerState.STARTED) && rollupJobStatus.getPosition() != null); - }, 60, TimeUnit.SECONDS); - - GetRollupJobsAction.Request getRequest = new GetRollupJobsAction.Request("testIndexPattern"); - GetRollupJobsAction.Response response = client().execute(GetRollupJobsAction.INSTANCE, getRequest).get(); - Assert.assertThat(response.getJobs().size(), equalTo(1)); - Assert.assertThat(response.getJobs().get(0).getJob().getId(), equalTo("testIndexPattern")); - - GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("rolled").get(); - Assert.assertThat(getIndexResponse.indices().length, Matchers.greaterThan(0)); - } - - public void testTwoJobsStartStopDeleteOne() throws Exception { - MetricConfig metricConfig = new MetricConfig.Builder() - .setField("foo") - .setMetrics(Arrays.asList("sum", "min", "max", "avg")) - .build(); - - DateHistoGroupConfig.Builder datehistoGroupConfig = new DateHistoGroupConfig.Builder(); - datehistoGroupConfig.setField("date_histo"); - datehistoGroupConfig.setInterval(new DateHistogramInterval("1d")); - - GroupConfig.Builder groupConfig = new GroupConfig.Builder(); - groupConfig.setDateHisto(datehistoGroupConfig.build()); - - - RollupJobConfig.Builder config = new RollupJobConfig.Builder(); - config.setIndexPattern("test-1"); - config.setRollupIndex("rolled"); - config.setId("job1"); - config.setGroupConfig(groupConfig.build()); - config.setMetricsConfig(Collections.singletonList(metricConfig)); - config.setCron("* * * * * ? *"); - config.setPageSize(10); - - PutRollupJobAction.Request request = new PutRollupJobAction.Request(); - request.setConfig(config.build()); - client().execute(PutRollupJobAction.INSTANCE, request).get(); - - RollupJobConfig.Builder config2 = new RollupJobConfig.Builder(); - config2.setIndexPattern("test-1"); - config2.setRollupIndex("rolled"); - config2.setId("job2"); - config2.setGroupConfig(groupConfig.build()); - config2.setMetricsConfig(Collections.singletonList(metricConfig)); - config2.setCron("* * * * * ? *"); - config2.setPageSize(10); - - PutRollupJobAction.Request request2 = new PutRollupJobAction.Request(); - request2.setConfig(config2.build()); - client().execute(PutRollupJobAction.INSTANCE, request2).get(); - - StartRollupJobAction.Request startRequest = new StartRollupJobAction.Request("job1"); - StartRollupJobAction.Response response = client().execute(StartRollupJobAction.INSTANCE, startRequest).get(); - Assert.assertThat(response.isStarted(), equalTo(true)); - - // Make sure it started - ESTestCase.assertBusy(() -> { - RollupJobStatus rollupJobStatus = getRollupJobStatus("job1"); - if (rollupJobStatus == null) { - fail("null"); - } - - IndexerState state = rollupJobStatus.getIndexerState(); - assertTrue(state.equals(IndexerState.STARTED) || state.equals(IndexerState.INDEXING)); - }, 60, TimeUnit.SECONDS); - - //but not the other task - ESTestCase.assertBusy(() -> { - RollupJobStatus rollupJobStatus = getRollupJobStatus("job2"); - - IndexerState state = rollupJobStatus.getIndexerState(); - assertTrue(state.equals(IndexerState.STOPPED)); - }, 60, TimeUnit.SECONDS); - - // Delete the task - DeleteRollupJobAction.Request deleteRequest = new DeleteRollupJobAction.Request("job1"); - DeleteRollupJobAction.Response deleteResponse = client().execute(DeleteRollupJobAction.INSTANCE, deleteRequest).get(); - Assert.assertTrue(deleteResponse.isAcknowledged()); - - // Make sure the first job's task is gone - ESTestCase.assertBusy(() -> { - RollupJobStatus rollupJobStatus = getRollupJobStatus("job1"); - assertTrue(rollupJobStatus == null); - }, 60, TimeUnit.SECONDS); - - // And that we don't see it in the GetJobs API - GetRollupJobsAction.Request getRequest = new GetRollupJobsAction.Request("job1"); - GetRollupJobsAction.Response getResponse = client().execute(GetRollupJobsAction.INSTANCE, getRequest).get(); - Assert.assertThat(getResponse.getJobs().size(), equalTo(0)); - - // But make sure the other job is still there - getRequest = new GetRollupJobsAction.Request("job2"); - getResponse = client().execute(GetRollupJobsAction.INSTANCE, getRequest).get(); - Assert.assertThat(getResponse.getJobs().size(), equalTo(1)); - Assert.assertThat(getResponse.getJobs().get(0).getJob().getId(), equalTo("job2")); - - // and still STOPPED - ESTestCase.assertBusy(() -> { - RollupJobStatus rollupJobStatus = getRollupJobStatus("job2"); - - IndexerState state = rollupJobStatus.getIndexerState(); - assertTrue(state.equals(IndexerState.STOPPED)); - }, 60, TimeUnit.SECONDS); - } - - public void testBig() throws Exception { - - client().admin().indices().prepareCreate("test-big") - .addMapping("test-big", "{\"test-big\": {\"properties\": {\"timestamp\": {\"type\": \"date\"}, " + - "\"thefield\": {\"type\": \"integer\"}}}}", XContentType.JSON) - .setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get(); - client().admin().cluster().prepareHealth("test-big").setWaitForYellowStatus().get(); - - client().admin().indices().prepareCreate("test-verify") - .addMapping("test-big", "{\"test-big\": {\"properties\": {\"timestamp\": {\"type\": \"date\"}, " + - "\"thefield\": {\"type\": \"integer\"}}}}", XContentType.JSON) - .setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get(); - client().admin().cluster().prepareHealth("test-verify").setWaitForYellowStatus().get(); - - BulkRequestBuilder bulk = client().prepareBulk(); - Map source = new HashMap<>(3); - - int numDays = 90; - int numDocsPerDay = 100; - - for (int i = 0; i < numDays; i++) { - DateTime ts = new DateTime().minusDays(i); - for (int j = 0; j < numDocsPerDay; j++) { - - int value = ESTestCase.randomIntBetween(0,100); - source.put("timestamp", ts.toString()); - source.put("thefield", value); - bulk.add(new IndexRequest("test-big", "test-big").source(source)); - bulk.add(new IndexRequest("test-verify", "test-big").source(source)); - source.clear(); - } - - bulk.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); - bulk.get(); - bulk = client().prepareBulk(); - logger.info("Day: [" + i + "]: " + ts.toString() + " [" + ts.getMillis() + "]" ); - } - - - client().admin().indices().prepareRefresh("test-big").get(); - client().admin().indices().prepareRefresh("test-verify").get(); - - MetricConfig metricConfig = new MetricConfig.Builder() - .setField("thefield") - .setMetrics(Arrays.asList("sum", "min", "max", "avg")) - .build(); - - DateHistoGroupConfig.Builder datehistoGroupConfig = new DateHistoGroupConfig.Builder(); - datehistoGroupConfig.setField("timestamp"); - datehistoGroupConfig.setInterval(new DateHistogramInterval("1d")); - - GroupConfig.Builder groupConfig = new GroupConfig.Builder(); - groupConfig.setDateHisto(datehistoGroupConfig.build()); - - RollupJobConfig.Builder config = new RollupJobConfig.Builder(); - config.setIndexPattern("test-big"); - config.setRollupIndex("rolled"); - config.setId(taskId); - config.setGroupConfig(groupConfig.build()); - config.setMetricsConfig(Collections.singletonList(metricConfig)); - config.setCron("* * * * * ? *"); - config.setPageSize(1000); - - PutRollupJobAction.Request request = new PutRollupJobAction.Request(); - request.setConfig(config.build()); - client().execute(PutRollupJobAction.INSTANCE, request).get(); - - StartRollupJobAction.Request startRequest = new StartRollupJobAction.Request(taskId); - StartRollupJobAction.Response response = client().execute(StartRollupJobAction.INSTANCE, startRequest).get(); - Assert.assertThat(response.isStarted(), equalTo(true)); - - ESTestCase.assertBusy(() -> { - RollupJobStatus rollupJobStatus = getRollupJobStatus(taskId); - if (rollupJobStatus == null) { - fail("null"); - } - - IndexerState state = rollupJobStatus.getIndexerState(); - logger.error("state: [" + state + "]"); - assertTrue(state.equals(IndexerState.STARTED) && rollupJobStatus.getPosition() != null); - }, 60, TimeUnit.SECONDS); - - RollupJobStatus rollupJobStatus = getRollupJobStatus(taskId); - if (rollupJobStatus == null) { - Assert.fail("rollup job status should not be null"); - } - - client().admin().indices().prepareRefresh("rolled").get(); - - SearchResponse count = client().prepareSearch("rolled").setSize(10).get(); - // total document is numDays minus 1 because we don't build rollup for - // buckets that are not full (bucket for the current day). - Assert.assertThat(count.getHits().totalHits, equalTo(Long.valueOf(numDays-1))); - - if (ESTestCase.randomBoolean()) { - client().admin().indices().prepareDelete("test-big").get(); - client().admin().indices().prepareRefresh().get(); - } - - // Execute the rollup search - SearchRequest rollupRequest = new SearchRequest("rolled") - .source(new SearchSourceBuilder() - .aggregation(dateHistogram("timestamp") - .interval(1000*86400) - .field("timestamp")) - .size(0)); - SearchResponse searchResponse = client().execute(RollupSearchAction.INSTANCE, rollupRequest).get(); - Assert.assertNotNull(searchResponse); - - // And a regular search against the verification index - SearchRequest verifyRequest = new SearchRequest("test-verify") - .source(new SearchSourceBuilder() - .aggregation(dateHistogram("timestamp") - .interval(1000*86400) - .field("timestamp")) - .size(0)); - SearchResponse verifyResponse = client().execute(SearchAction.INSTANCE, verifyRequest).get(); - - Map rollupAggs = searchResponse.getAggregations().asMap(); - - for (Aggregation agg : verifyResponse.getAggregations().asList()) { - Aggregation rollupAgg = rollupAggs.get(agg.getName()); - - Assert.assertNotNull(rollupAgg); - Assert.assertThat(rollupAgg.getType(), equalTo(agg.getType())); - verifyAgg((InternalDateHistogram)agg, (InternalDateHistogram)rollupAgg); - } - - // And a quick sanity check for doc type - SearchRequest rollupRawRequest = new SearchRequest("rolled") - .source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()) - .size(1)); - SearchResponse searchRawResponse = client().execute(SearchAction.INSTANCE, rollupRawRequest).get(); - Assert.assertNotNull(searchRawResponse); - assertThat(searchRawResponse.getHits().getAt(0).getType(), equalTo("_doc")); - } - - private void verifyAgg(InternalDateHistogram verify, InternalDateHistogram rollup) { - for (int i = 0; i < rollup.getBuckets().size(); i++) { - InternalDateHistogram.Bucket verifyBucket = verify.getBuckets().get(i); - InternalDateHistogram.Bucket rollupBucket = rollup.getBuckets().get(i); - Assert.assertThat(rollupBucket.getDocCount(), equalTo(verifyBucket.getDocCount())); - Assert.assertThat(((DateTime)rollupBucket.getKey()).getMillis(), equalTo(((DateTime)verifyBucket.getKey()).getMillis())); - Assert.assertTrue(rollupBucket.getAggregations().equals(verifyBucket.getAggregations())); - } - } - - private RollupJobStatus getRollupJobStatus(final String taskId) { - final GetRollupJobsAction.Request request = new GetRollupJobsAction.Request(taskId); - final GetRollupJobsAction.Response response = client().execute(GetRollupJobsAction.INSTANCE, request).actionGet(); - - if (response.getJobs() != null && response.getJobs().isEmpty() == false) { - assertThat("Expect 1 rollup job with id " + taskId, response.getJobs().size(), equalTo(1)); - return response.getJobs().iterator().next().getStatus(); - } - return null; - } - - @After - public void cleanup() throws ExecutionException, InterruptedException { - GetRollupJobsAction.Request getRequest = new GetRollupJobsAction.Request("_all"); - GetRollupJobsAction.Response response = client().execute(GetRollupJobsAction.INSTANCE, getRequest).get(); - - for (GetRollupJobsAction.JobWrapper job : response.getJobs()) { - StopRollupJobAction.Request stopRequest = new StopRollupJobAction.Request(job.getJob().getId()); - try { - client().execute(StopRollupJobAction.INSTANCE, stopRequest).get(); - } catch (ElasticsearchException e) { - // - } - - DeleteRollupJobAction.Request deleteRequest = new DeleteRollupJobAction.Request(job.getJob().getId()); - client().execute(DeleteRollupJobAction.INSTANCE, deleteRequest).get(); - } - } -} diff --git a/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java new file mode 100644 index 0000000000000..b0142ae141853 --- /dev/null +++ b/x-pack/qa/multi-node/src/test/java/org/elasticsearch/multi_node/RollupIT.java @@ -0,0 +1,326 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.multi_node; + +import org.apache.http.HttpStatus; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.core.rollup.job.RollupJob; +import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; +import org.junit.After; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.isOneOf; + +public class RollupIT extends ESRestTestCase { + + @Override + protected Settings restClientSettings() { + return getClientSettings("super-user", "x-pack-super-password"); + } + + @Override + protected Settings restAdminSettings() { + return getClientSettings("super-user", "x-pack-super-password"); + } + + private Settings getClientSettings(final String username, final String password) { + final String token = basicAuthHeaderValue(username, new SecureString(password.toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + static Map toMap(Response response) throws IOException { + return toMap(EntityUtils.toString(response.getEntity())); + } + + static Map toMap(String response) throws IOException { + return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false); + } + + @After + public void clearRollupMetadata() throws Exception { + deleteAllJobs(); + waitForPendingTasks(); + // indices will be deleted by the ESRestTestCase class + } + + public void testBigRollup() throws Exception { + final int numDocs = 200; + + // index documents for the rollup job + final StringBuilder bulk = new StringBuilder(); + for (int i = 0; i < numDocs; i++) { + bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"_doc\"}}\n"); + ZonedDateTime zdt = ZonedDateTime.ofInstant(Instant.ofEpochSecond(1531221196 + (60*i)), ZoneId.of("UTC")); + String date = zdt.format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); + bulk.append("{\"timestamp\":\"").append(date).append("\",\"value\":").append(i).append("}\n"); + } + bulk.append("\r\n"); + + final Request bulkRequest = new Request("POST", "/_bulk"); + bulkRequest.addParameter("refresh", "true"); + bulkRequest.setJsonEntity(bulk.toString()); + client().performRequest(bulkRequest); + // create the rollup job + final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-job-test"); + createRollupJobRequest.setJsonEntity("{" + + "\"index_pattern\":\"rollup-*\"," + + "\"rollup_index\":\"results-rollup\"," + + "\"cron\":\"*/1 * * * * ?\"," // fast cron and big page size so test runs quickly + + "\"page_size\":20," + + "\"groups\":{" + + " \"date_histogram\":{" + + " \"field\":\"timestamp\"," + + " \"interval\":\"5m\"" + + " }" + + "}," + + "\"metrics\":[" + + " {\"field\":\"value\",\"metrics\":[\"min\",\"max\",\"sum\"]}" + + "]" + + "}"); + + Map createRollupJobResponse = toMap(client().performRequest(createRollupJobRequest)); + assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + + // start the rollup job + final Request startRollupJobRequest = new Request("POST", "_xpack/rollup/job/rollup-job-test/_start"); + Map startRollupJobResponse = toMap(client().performRequest(startRollupJobRequest)); + assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); + + assertRollUpJob("rollup-job-test"); + + // Wait for the job to finish, by watching how many rollup docs we've indexed + assertBusy(() -> { + final Request getRollupJobRequest = new Request("GET", "_xpack/rollup/job/rollup-job-test"); + Response getRollupJobResponse = client().performRequest(getRollupJobRequest); + assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + Map job = getJob(getRollupJobResponse, "rollup-job-test"); + if (job != null) { + assertThat(ObjectPath.eval("status.job_state", job), equalTo("started")); + assertThat(ObjectPath.eval("stats.rollups_indexed", job), equalTo(41)); + } + }, 30L, TimeUnit.SECONDS); + + // Refresh the rollup index to make sure all newly indexed docs are searchable + final Request refreshRollupIndex = new Request("POST", "results-rollup/_refresh"); + toMap(client().performRequest(refreshRollupIndex)); + + String jsonRequestBody = "{\n" + + " \"size\": 0,\n" + + " \"query\": {\n" + + " \"match_all\": {}\n" + + " },\n" + + " \"aggs\": {\n" + + " \"date_histo\": {\n" + + " \"date_histogram\": {\n" + + " \"field\": \"timestamp\",\n" + + " \"interval\": \"1h\"\n" + + " },\n" + + " \"aggs\": {\n" + + " \"the_max\": {\n" + + " \"max\": {\n" + + " \"field\": \"value\"\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + " }\n" + + "}"; + + Request request = new Request("GET", "rollup-docs/_search"); + request.setJsonEntity(jsonRequestBody); + Response liveResponse = client().performRequest(request); + Map liveBody = toMap(liveResponse); + + request = new Request("GET", "results-rollup/_rollup_search"); + request.setJsonEntity(jsonRequestBody); + Response rollupResponse = client().performRequest(request); + Map rollupBody = toMap(rollupResponse); + + // Do the live agg results match the rollup agg results? + assertThat(ObjectPath.eval("aggregations.date_histo.buckets", liveBody), + equalTo(ObjectPath.eval("aggregations.date_histo.buckets", rollupBody))); + + request = new Request("GET", "rollup-docs/_rollup_search"); + request.setJsonEntity(jsonRequestBody); + Response liveRollupResponse = client().performRequest(request); + Map liveRollupBody = toMap(liveRollupResponse); + + // Does searching the live index via rollup_search work match the live search? + assertThat(ObjectPath.eval("aggregations.date_histo.buckets", liveBody), + equalTo(ObjectPath.eval("aggregations.date_histo.buckets", liveRollupBody))); + + } + + @SuppressWarnings("unchecked") + private void assertRollUpJob(final String rollupJob) throws Exception { + String[] states = new String[]{"indexing", "started"}; + waitForRollUpJob(rollupJob, states); + + // check that the rollup job is started using the RollUp API + final Request getRollupJobRequest = new Request("GET", "_xpack/rollup/job/" + rollupJob); + Map getRollupJobResponse = toMap(client().performRequest(getRollupJobRequest)); + Map job = getJob(getRollupJobResponse, rollupJob); + if (job != null) { + assertThat(ObjectPath.eval("status.job_state", job), isOneOf(states)); + } + + // check that the rollup job is started using the Tasks API + final Request taskRequest = new Request("GET", "_tasks"); + taskRequest.addParameter("detailed", "true"); + taskRequest.addParameter("actions", "xpack/rollup/*"); + Map taskResponse = toMap(client().performRequest(taskRequest)); + Map taskResponseNodes = (Map) taskResponse.get("nodes"); + Map taskResponseNode = (Map) taskResponseNodes.values().iterator().next(); + Map taskResponseTasks = (Map) taskResponseNode.get("tasks"); + Map taskResponseStatus = (Map) taskResponseTasks.values().iterator().next(); + assertThat(ObjectPath.eval("status.job_state", taskResponseStatus), isOneOf(states)); + + // check that the rollup job is started using the Cluster State API + final Request clusterStateRequest = new Request("GET", "_cluster/state/metadata"); + Map clusterStateResponse = toMap(client().performRequest(clusterStateRequest)); + List> rollupJobTasks = ObjectPath.eval("metadata.persistent_tasks.tasks", clusterStateResponse); + + boolean hasRollupTask = false; + for (Map task : rollupJobTasks) { + if (ObjectPath.eval("id", task).equals(rollupJob)) { + hasRollupTask = true; + + final String jobStateField = "task.xpack/rollup/job.state.job_state"; + assertThat("Expected field [" + jobStateField + "] to be started or indexing in " + task.get("id"), + ObjectPath.eval(jobStateField, task), isOneOf(states)); + break; + } + } + if (hasRollupTask == false) { + fail("Expected persistent task for [" + rollupJob + "] but none found."); + } + + } + + private void waitForRollUpJob(final String rollupJob,String[] expectedStates) throws Exception { + assertBusy(() -> { + final Request getRollupJobRequest = new Request("GET", "_xpack/rollup/job/" + rollupJob); + Response getRollupJobResponse = client().performRequest(getRollupJobRequest); + assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + Map job = getJob(getRollupJobResponse, rollupJob); + if (job != null) { + assertThat(ObjectPath.eval("status.job_state", job), isOneOf(expectedStates)); + } + }, 30L, TimeUnit.SECONDS); + } + + private Map getJob(Response response, String targetJobId) throws IOException { + return getJob(ESRestTestCase.entityAsMap(response), targetJobId); + } + + @SuppressWarnings("unchecked") + private Map getJob(Map jobsMap, String targetJobId) throws IOException { + + List> jobs = + (List>) XContentMapValues.extractValue("jobs", jobsMap); + + if (jobs == null) { + return null; + } + + for (Map job : jobs) { + String jobId = (String) ((Map) job.get("config")).get("id"); + if (jobId.equals(targetJobId)) { + return job; + } + } + return null; + } + + private void waitForPendingTasks() throws Exception { + ESTestCase.assertBusy(() -> { + try { + Request request = new Request("GET", "/_cat/tasks"); + request.addParameter("detailed", "true"); + Response response = adminClient().performRequest(request); + if (response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { + try (BufferedReader responseReader = new BufferedReader( + new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + int activeTasks = 0; + String line; + StringBuilder tasksListString = new StringBuilder(); + while ((line = responseReader.readLine()) != null) { + + // We only care about Rollup jobs, otherwise this fails too easily due to unrelated tasks + if (line.startsWith(RollupJob.NAME) == true) { + activeTasks++; + tasksListString.append(line); + tasksListString.append('\n'); + } + } + assertEquals(activeTasks + " active tasks found:\n" + tasksListString, 0, activeTasks); + } + } + } catch (IOException e) { + throw new AssertionError("Error getting active tasks list", e); + } + }); + } + + @SuppressWarnings("unchecked") + private void deleteAllJobs() throws Exception { + Request request = new Request("GET", "/_xpack/rollup/job/_all"); + Response response = adminClient().performRequest(request); + Map jobs = ESRestTestCase.entityAsMap(response); + @SuppressWarnings("unchecked") + List> jobConfigs = + (List>) XContentMapValues.extractValue("jobs", jobs); + + if (jobConfigs == null) { + return; + } + + for (Map jobConfig : jobConfigs) { + logger.debug(jobConfig); + String jobId = (String) ((Map) jobConfig.get("config")).get("id"); + logger.debug("Deleting job " + jobId); + try { + request = new Request("DELETE", "/_xpack/rollup/job/" + jobId); + adminClient().performRequest(request); + } catch (Exception e) { + // ok + } + } + } + + private static String responseEntityToString(Response response) throws Exception { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8))) { + return reader.lines().collect(Collectors.joining("\n")); + } + } +} From d88d76483c970592a141512383e1a6a9d80d0e1a Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Fri, 13 Jul 2018 18:08:35 +0100 Subject: [PATCH 09/13] Adds a new auto-interval date histogram (#28993) * Adds a new auto-interval date histogram This change adds a new type of histogram aggregation called `auto_date_histogram` where you can specify the target number of buckets you require and it will find an appropriate interval for the returned buckets. The aggregation works by first collecting documents in buckets at second interval, when it has created more than the target number of buckets it merges these buckets into minute interval bucket and continues collecting until it reaches the target number of buckets again. It will keep merging buckets when it exceeds the target until either collection is finished or the highest interval (currently years) is reached. A similar process happens at reduce time. This aggregation intentionally does not support min_doc_count, offest and extended_bounds to keep the already complex logic from becoming more complex. The aggregation accepts sub-aggregations but will always operate in `breadth_first` mode deferring the computation of sub-aggregations until the final buckets from the shard are known. min_doc_count is effectively hard-coded to zero meaning that we will insert empty buckets where necessary. Closes #9572 * Adds documentation * Added sub aggregator test * Fixes failing docs test * Brings branch up to date with master changes * trying to get tests to pass again * Fixes multiBucketConsumer accounting * Collects more buckets than needed on shards This gives us more options at reduce time in terms of how we do the final merge of the buckeets to produce the final result * Revert "Collects more buckets than needed on shards" This reverts commit 993c782d117892af9a3c86a51921cdee630a3ac5. * Adds ability to merge within a rounding * Fixes nonn-timezone doc test failure * Fix time zone tests * iterates on tests * Adds test case and documentation changes Added some notes in the documentation about the intervals that can bbe returned. Also added a test case that utilises the merging of conseecutive buckets * Fixes performance bug The bug meant that getAppropriate rounding look a huge amount of time if the range of the data was large but also sparsely populated. In these situations the rounding would be very low so iterating through the rounding values from the min key to the max keey look a long time (~120 seconds in one test). The solution is to add a rough estimate first which chooses the rounding based just on the long values of the min and max keeys alone but selects the rounding one lower than the one it thinks is appropriate so the accurate method can choose the final rounding taking into account the fact that intervals are not always fixed length. Thee commit also adds more tests * Changes to only do complex reduction on final reduce * merge latest with master * correct tests and add a new test case for 10k buckets * refactor to perform bucket number check in innerBuild * correctly derive bucket setting, update tests to increase bucket threshold * fix checkstyle * address code review comments * add documentation for default buckets * fix typo --- .../client/RestHighLevelClient.java | 3 + docs/reference/aggregations/bucket.asciidoc | 2 + .../autodatehistogram-aggregation.asciidoc | 283 ++++ .../elasticsearch/search/SearchModule.java | 4 + .../bucket/BucketsAggregator.java | 13 + .../MergingBucketsDeferringCollector.java | 236 +++ .../AutoDateHistogramAggregationBuilder.java | 218 +++ .../AutoDateHistogramAggregator.java | 199 +++ .../AutoDateHistogramAggregatorFactory.java | 72 + .../histogram/DateHistogramAggregator.java | 4 +- .../histogram/InternalAutoDateHistogram.java | 601 ++++++++ .../histogram/InternalDateHistogram.java | 2 +- .../histogram/ParsedAutoDateHistogram.java | 91 ++ .../aggregations/AggregationsTests.java | 2 + .../bucket/AutoDateHistogramTests.java | 44 + .../AutoDateHistogramAggregatorTests.java | 1332 +++++++++++++++++ .../InternalAutoDateHistogramTests.java | 154 ++ .../aggregations/AggregatorTestCase.java | 3 +- .../test/InternalAggregationTestCase.java | 3 + ...nternalMultiBucketAggregationTestCase.java | 3 +- 20 files changed, 3263 insertions(+), 6 deletions(-) create mode 100644 docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 736a49879188b..b9367303f7fa4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -85,8 +85,10 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.ParsedAutoDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram; import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; @@ -1347,6 +1349,7 @@ static List getDefaultNamedXContents() { map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); + map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c)); map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index 3c8f3599981f9..e4b17ea293232 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -19,6 +19,8 @@ setting named `search.max_buckets`. It is disabled by default (-1) but requests include::bucket/adjacency-matrix-aggregation.asciidoc[] +include::bucket/autodatehistogram-aggregation.asciidoc[] + include::bucket/children-aggregation.asciidoc[] include::bucket/composite-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc new file mode 100644 index 0000000000000..28cb65ce6cc48 --- /dev/null +++ b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc @@ -0,0 +1,283 @@ +[[search-aggregations-bucket-autodatehistogram-aggregation]] +=== Auto-interval Date Histogram Aggregation + +A multi-bucket aggregation similar to the <> except +instead of providing an interval to use as the width of each bucket, a target number of buckets is provided +indicating the number of buckets needed and the interval of the buckets is automatically chosen to best achieve +that target. The number of buckets returned will always be less than or equal to this target number. + +The buckets field is optional, and will default to 10 buckets if not specified. + +Requesting a target of 10 buckets. + +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "auto_date_histogram" : { + "field" : "date", + "buckets" : 10 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +==== Keys + +Internally, a date is represented as a 64 bit number representing a timestamp +in milliseconds-since-the-epoch. These timestamps are returned as the bucket +++key++s. The `key_as_string` is the same timestamp converted to a formatted +date string using the format specified with the `format` parameter: + +TIP: If no `format` is specified, then it will use the first date +<> specified in the field mapping. + +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sales_over_time" : { + "auto_date_histogram" : { + "field" : "date", + "buckets" : 5, + "format" : "yyyy-MM-dd" <1> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +<1> Supports expressive date <> + +Response: + +[source,js] +-------------------------------------------------- +{ + ... + "aggregations": { + "sales_over_time": { + "buckets": [ + { + "key_as_string": "2015-01-01", + "key": 1420070400000, + "doc_count": 3 + }, + { + "key_as_string": "2015-02-01", + "key": 1422748800000, + "doc_count": 2 + }, + { + "key_as_string": "2015-03-01", + "key": 1425168000000, + "doc_count": 2 + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] + +=== Intervals + +The interval of the returned buckets is selected based on the data collected by the +aggregation so that the number of buckets returned is less than or equal to the number +requested. The possible intervals returned are: + +[horizontal] +seconds:: In multiples of 1, 5, 10 and 30 +minutes:: In multiples of 1, 5, 10 and 30 +hours:: In multiples of 1, 3 and 12 +days:: In multiples of 1, and 7 +months:: In multiples of 1, and 3 +years:: In multiples of 1, 5, 10, 20, 50 and 100 + +In the worst case, where the number of daily buckets are too many for the requested +number of buckets, the number of buckets returned will be 1/7th of the number of +buckets requested. + +==== Time Zone + +Date-times are stored in Elasticsearch in UTC. By default, all bucketing and +rounding is also done in UTC. The `time_zone` parameter can be used to indicate +that bucketing should use a different time zone. + +Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or +`-08:00`) or as a timezone id, an identifier used in the TZ database like +`America/Los_Angeles`. + +Consider the following example: + +[source,js] +--------------------------------- +PUT my_index/log/1?refresh +{ + "date": "2015-10-01T00:30:00Z" +} + +PUT my_index/log/2?refresh +{ + "date": "2015-10-01T01:30:00Z" +} + +PUT my_index/log/3?refresh +{ + "date": "2015-10-01T02:30:00Z" +} + +GET my_index/_search?size=0 +{ + "aggs": { + "by_day": { + "auto_date_histogram": { + "field": "date", + "buckets" : 3 + } + } + } +} +--------------------------------- +// CONSOLE + +UTC is used if no time zone is specified, three 1-hour buckets are returned +starting at midnight UTC on 1 October 2015: + +[source,js] +--------------------------------- +{ + ... + "aggregations": { + "by_day": { + "buckets": [ + { + "key_as_string": "2015-10-01T00:00:00.000Z", + "key": 1443657600000, + "doc_count": 1 + }, + { + "key_as_string": "2015-10-01T01:00:00.000Z", + "key": 1443661200000, + "doc_count": 1 + }, + { + "key_as_string": "2015-10-01T02:00:00.000Z", + "key": 1443664800000, + "doc_count": 1 + } + ] + } + } +} +--------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] + +If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before +midnight UTC: + +[source,js] +--------------------------------- +GET my_index/_search?size=0 +{ + "aggs": { + "by_day": { + "auto_date_histogram": { + "field": "date", + "buckets" : 3, + "time_zone": "-01:00" + } + } + } +} +--------------------------------- +// CONSOLE +// TEST[continued] + + +Now three 1-hour buckets are still returned but the first bucket starts at +11:00pm on 30 September 2015 since that is the local time for the bucket in +the specified time zone. + +[source,js] +--------------------------------- +{ + ... + "aggregations": { + "by_day": { + "buckets": [ + { + "key_as_string": "2015-09-30T23:00:00.000-01:00", + "key": 1443657600000, + "doc_count": 1 + }, + { + "key_as_string": "2015-10-01T00:00:00.000-01:00", + "key": 1443661200000, + "doc_count": 1 + }, + { + "key_as_string": "2015-10-01T01:00:00.000-01:00", + "key": 1443664800000, + "doc_count": 1 + } + ] + } + } +} +--------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] + +<1> The `key_as_string` value represents midnight on each day + in the specified time zone. + +WARNING: When using time zones that follow DST (daylight savings time) changes, +buckets close to the moment when those changes happen can have slightly different +sizes than neighbouring buckets. +For example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am, +clocks were turned forward 1 hour to 3am local time. If the result of the aggregation +was daily buckets, the bucket covering that day will only hold data for 23 hours +instead of the usual 24 hours for other buckets. The same is true for shorter intervals +like e.g. 12h. Here, we will have only a 11h bucket on the morning of 27 March when the +DST shift happens. + +==== Scripts + +Like with the normal <>, both document level +scripts and value level scripts are supported. This aggregation does not however, support the `min_doc_count`, +`extended_bounds` and `order` parameters. + +==== Missing value + +The `missing` parameter defines how documents that are missing a value should be treated. +By default they will be ignored but it is also possible to treat them as if they +had a value. + +[source,js] +-------------------------------------------------- +POST /sales/_search?size=0 +{ + "aggs" : { + "sale_date" : { + "auto_date_histogram" : { + "field" : "date", + "buckets": 10, + "missing": "2000/01/01" <1> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`. + diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 8188b69d6c046..b00d91f260b8d 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -109,8 +109,10 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing; @@ -396,6 +398,8 @@ private void registerAggregations(List plugins) { HistogramAggregationBuilder::parse).addResultReader(InternalHistogram::new)); registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder.NAME, DateHistogramAggregationBuilder::new, DateHistogramAggregationBuilder::parse).addResultReader(InternalDateHistogram::new)); + registerAggregation(new AggregationSpec(AutoDateHistogramAggregationBuilder.NAME, AutoDateHistogramAggregationBuilder::new, + AutoDateHistogramAggregationBuilder::parse).addResultReader(InternalAutoDateHistogram::new)); registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder.NAME, GeoDistanceAggregationBuilder::new, GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new)); registerAggregation(new AggregationSpec(GeoGridAggregationBuilder.NAME, GeoGridAggregationBuilder::new, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 504758e7a4ec2..7b09ac9d61895 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -84,6 +84,19 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do subCollector.collect(doc, bucketOrd); } + public final void mergeBuckets(long[] mergeMap, long newNumBuckets) { + try (IntArray oldDocCounts = docCounts) { + docCounts = bigArrays.newIntArray(newNumBuckets, true); + docCounts.fill(0, newNumBuckets, 0); + for (int i = 0; i < oldDocCounts.size(); i++) { + int docCount = oldDocCounts.get(i); + if (docCount != 0) { + docCounts.increment(mergeMap[i], docCount); + } + } + } + } + public IntArray getDocCounts() { return docCounts; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java new file mode 100644 index 0000000000000..f357e9d286f54 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java @@ -0,0 +1,236 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.packed.PackedInts; +import org.apache.lucene.util.packed.PackedLongValues; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.BucketCollector; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * A specialization of {@link DeferringBucketCollector} that collects all + * matches and then is able to replay a given subset of buckets. Exposes + * mergeBuckets, which can be invoked by the aggregator when increasing the + * rounding interval. + */ +public class MergingBucketsDeferringCollector extends DeferringBucketCollector { + + List entries = new ArrayList<>(); + BucketCollector collector; + final SearchContext searchContext; + LeafReaderContext context; + PackedLongValues.Builder docDeltas; + PackedLongValues.Builder buckets; + long maxBucket = -1; + boolean finished = false; + LongHash selectedBuckets; + + public MergingBucketsDeferringCollector(SearchContext context) { + this.searchContext = context; + } + + @Override + public void setDeferredCollector(Iterable deferredCollectors) { + this.collector = BucketCollector.wrap(deferredCollectors); + } + + @Override + public boolean needsScores() { + if (collector == null) { + throw new IllegalStateException(); + } + return collector.needsScores(); + } + + @Override + public void preCollection() throws IOException { + collector.preCollection(); + } + + private void finishLeaf() { + if (context != null) { + entries.add(new Entry(context, docDeltas.build(), buckets.build())); + } + context = null; + docDeltas = null; + buckets = null; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + finishLeaf(); + + context = ctx; + docDeltas = PackedLongValues.packedBuilder(PackedInts.DEFAULT); + buckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); + + return new LeafBucketCollector() { + int lastDoc = 0; + + @Override + public void collect(int doc, long bucket) { + docDeltas.add(doc - lastDoc); + buckets.add(bucket); + lastDoc = doc; + maxBucket = Math.max(maxBucket, bucket); + } + }; + } + + public void mergeBuckets(long[] mergeMap) { + + List newEntries = new ArrayList<>(entries.size()); + for (Entry sourceEntry : entries) { + PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); + for (PackedLongValues.Iterator itr = sourceEntry.buckets.iterator(); itr.hasNext();) { + long bucket = itr.next(); + newBuckets.add(mergeMap[Math.toIntExact(bucket)]); + } + newEntries.add(new Entry(sourceEntry.context, sourceEntry.docDeltas, newBuckets.build())); + } + entries = newEntries; + + // if there are buckets that have been collected in the current segment + // we need to update the bucket ordinals there too + if (buckets.size() > 0) { + PackedLongValues currentBuckets = buckets.build(); + PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); + for (PackedLongValues.Iterator itr = currentBuckets.iterator(); itr.hasNext();) { + long bucket = itr.next(); + newBuckets.add(mergeMap[Math.toIntExact(bucket)]); + } + buckets = newBuckets; + } + } + + @Override + public void postCollection() { + finishLeaf(); + finished = true; + } + + /** + * Replay the wrapped collector, but only on a selection of buckets. + */ + @Override + public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { + if (finished == false) { + throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); + } + if (this.selectedBuckets != null) { + throw new IllegalStateException("Already been replayed"); + } + + final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE); + for (long bucket : selectedBuckets) { + hash.add(bucket); + } + this.selectedBuckets = hash; + + boolean needsScores = collector.needsScores(); + Weight weight = null; + if (needsScores) { + weight = searchContext.searcher().createNormalizedWeight(searchContext.query(), true); + } + for (Entry entry : entries) { + final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); + DocIdSetIterator docIt = null; + if (needsScores && entry.docDeltas.size() > 0) { + Scorer scorer = weight.scorer(entry.context); + // We don't need to check if the scorer is null + // since we are sure that there are documents to replay + // (entry.docDeltas it not empty). + docIt = scorer.iterator(); + leafCollector.setScorer(scorer); + } + final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator(); + final PackedLongValues.Iterator buckets = entry.buckets.iterator(); + int doc = 0; + for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) { + doc += docDeltaIterator.next(); + final long bucket = buckets.next(); + final long rebasedBucket = hash.find(bucket); + if (rebasedBucket != -1) { + if (needsScores) { + if (docIt.docID() < doc) { + docIt.advance(doc); + } + // aggregations should only be replayed on matching + // documents + assert docIt.docID() == doc; + } + leafCollector.collect(doc, rebasedBucket); + } + } + } + + collector.postCollection(); + } + + /** + * Wrap the provided aggregator so that it behaves (almost) as if it had + * been collected directly. + */ + @Override + public Aggregator wrap(final Aggregator in) { + + return new WrappedAggregator(in) { + + @Override + public InternalAggregation buildAggregation(long bucket) throws IOException { + if (selectedBuckets == null) { + throw new IllegalStateException("Collection has not been replayed yet."); + } + final long rebasedBucket = selectedBuckets.find(bucket); + if (rebasedBucket == -1) { + throw new IllegalStateException("Cannot build for a bucket which has not been collected [" + bucket + "]"); + } + return in.buildAggregation(rebasedBucket); + } + + }; + } + + private static class Entry { + final LeafReaderContext context; + final PackedLongValues docDeltas; + final PackedLongValues buckets; + + Entry(LeafReaderContext context, PackedLongValues docDeltas, PackedLongValues buckets) { + this.context = context; + this.docDeltas = docDeltas; + this.buckets = buckets; + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java new file mode 100644 index 0000000000000..366060835d891 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -0,0 +1,218 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.MultiBucketConsumerService; +import org.elasticsearch.search.aggregations.support.ValueType; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; +import org.elasticsearch.search.aggregations.support.ValuesSourceType; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +public class AutoDateHistogramAggregationBuilder + extends ValuesSourceAggregationBuilder { + + public static final String NAME = "auto_date_histogram"; + + public static final ParseField NUM_BUCKETS_FIELD = new ParseField("buckets"); + + private static final ObjectParser PARSER; + static { + PARSER = new ObjectParser<>(AutoDateHistogramAggregationBuilder.NAME); + ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true); + + PARSER.declareInt(AutoDateHistogramAggregationBuilder::setNumBuckets, NUM_BUCKETS_FIELD); + } + + public static AutoDateHistogramAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { + return PARSER.parse(parser, new AutoDateHistogramAggregationBuilder(aggregationName), null); + } + + private int numBuckets = 10; + + /** Create a new builder with the given name. */ + public AutoDateHistogramAggregationBuilder(String name) { + super(name, ValuesSourceType.NUMERIC, ValueType.DATE); + } + + /** Read from a stream, for internal use only. */ + public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { + super(in, ValuesSourceType.NUMERIC, ValueType.DATE); + numBuckets = in.readVInt(); + } + + protected AutoDateHistogramAggregationBuilder(AutoDateHistogramAggregationBuilder clone, Builder factoriesBuilder, + Map metaData) { + super(clone, factoriesBuilder, metaData); + this.numBuckets = clone.numBuckets; + } + + @Override + protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map metaData) { + return new AutoDateHistogramAggregationBuilder(this, factoriesBuilder, metaData); + } + + @Override + protected void innerWriteTo(StreamOutput out) throws IOException { + out.writeVInt(numBuckets); + } + + @Override + public String getType() { + return NAME; + } + + public AutoDateHistogramAggregationBuilder setNumBuckets(int numBuckets) { + if (numBuckets <= 0) { + throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName() + " must be greater than 0 for [" + name + "]"); + } + this.numBuckets = numBuckets; + return this; + } + + public int getNumBuckets() { + return numBuckets; + } + + @Override + protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, + AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { + RoundingInfo[] roundings = new RoundingInfo[6]; + roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE), 1000L, 1, 5, 10, 30); + roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR), 60 * 1000L, 1, 5, 10, 30); + roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY), 60 * 60 * 1000L, 1, 3, 12); + roundings[3] = new RoundingInfo(createRounding(DateTimeUnit.DAY_OF_MONTH), 24 * 60 * 60 * 1000L, 1, 7); + roundings[4] = new RoundingInfo(createRounding(DateTimeUnit.MONTH_OF_YEAR), 30 * 24 * 60 * 60 * 1000L, 1, 3); + roundings[5] = new RoundingInfo(createRounding(DateTimeUnit.YEAR_OF_CENTURY), 365 * 24 * 60 * 60 * 1000L, 1, 5, 10, 20, 50, 100); + + int maxRoundingInterval = Arrays.stream(roundings,0, roundings.length-1) + .map(rounding -> rounding.innerIntervals) + .flatMapToInt(Arrays::stream) + .boxed() + .reduce(Integer::max).get(); + Settings settings = context.getQueryShardContext().getIndexSettings().getNodeSettings(); + int maxBuckets = MultiBucketConsumerService.MAX_BUCKET_SETTING.get(settings); + int bucketCeiling = maxBuckets / maxRoundingInterval; + if (numBuckets > bucketCeiling) { + throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName()+ + " must be less than " + bucketCeiling); + } + return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData); + } + + private Rounding createRounding(DateTimeUnit interval) { + Rounding.Builder tzRoundingBuilder = Rounding.builder(interval); + if (timeZone() != null) { + tzRoundingBuilder.timeZone(timeZone()); + } + Rounding rounding = tzRoundingBuilder.build(); + return rounding; + } + + @Override + protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.field(NUM_BUCKETS_FIELD.getPreferredName(), numBuckets); + return builder; + } + + @Override + protected int innerHashCode() { + return Objects.hash(numBuckets); + } + + @Override + protected boolean innerEquals(Object obj) { + AutoDateHistogramAggregationBuilder other = (AutoDateHistogramAggregationBuilder) obj; + return Objects.equals(numBuckets, other.numBuckets); + } + + public static class RoundingInfo implements Writeable { + final Rounding rounding; + final int[] innerIntervals; + final long roughEstimateDurationMillis; + + public RoundingInfo(Rounding rounding, long roughEstimateDurationMillis, int... innerIntervals) { + this.rounding = rounding; + this.roughEstimateDurationMillis = roughEstimateDurationMillis; + this.innerIntervals = innerIntervals; + } + + public RoundingInfo(StreamInput in) throws IOException { + rounding = Rounding.Streams.read(in); + roughEstimateDurationMillis = in.readVLong(); + innerIntervals = in.readIntArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Rounding.Streams.write(rounding, out); + out.writeVLong(roughEstimateDurationMillis); + out.writeIntArray(innerIntervals); + } + + public int getMaximumInnerInterval() { + return innerIntervals[innerIntervals.length - 1]; + } + + public long getRoughEstimateDurationMillis() { + return roughEstimateDurationMillis; + } + + @Override + public int hashCode() { + return Objects.hash(rounding, Arrays.hashCode(innerIntervals)); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RoundingInfo other = (RoundingInfo) obj; + return Objects.equals(rounding, other.rounding) && + Objects.deepEquals(innerIntervals, other.innerIntervals); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java new file mode 100644 index 0000000000000..f86145386f1df --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -0,0 +1,199 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.util.LongHash; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; +import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator; +import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; +import org.elasticsearch.search.aggregations.bucket.MergingBucketsDeferringCollector; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * An aggregator for date values. Every date is rounded down using a configured + * {@link Rounding}. + * + * @see Rounding + */ +class AutoDateHistogramAggregator extends DeferableBucketAggregator { + + private final ValuesSource.Numeric valuesSource; + private final DocValueFormat formatter; + private final RoundingInfo[] roundingInfos; + private int roundingIdx = 0; + + private LongHash bucketOrds; + private int targetBuckets; + private MergingBucketsDeferringCollector deferringCollector; + + AutoDateHistogramAggregator(String name, AggregatorFactories factories, int numBuckets, RoundingInfo[] roundingInfos, + @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext aggregationContext, Aggregator parent, + List pipelineAggregators, Map metaData) throws IOException { + + super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); + this.targetBuckets = numBuckets; + this.valuesSource = valuesSource; + this.formatter = formatter; + this.roundingInfos = roundingInfos; + + bucketOrds = new LongHash(1, aggregationContext.bigArrays()); + + } + + @Override + public boolean needsScores() { + return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); + } + + @Override + protected boolean shouldDefer(Aggregator aggregator) { + return true; + } + + @Override + public DeferringBucketCollector getDeferringCollector() { + deferringCollector = new MergingBucketsDeferringCollector(context); + return deferringCollector; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, + final LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final SortedNumericDocValues values = valuesSource.longValues(ctx); + return new LeafBucketCollectorBase(sub, values) { + @Override + public void collect(int doc, long bucket) throws IOException { + assert bucket == 0; + if (values.advanceExact(doc)) { + final int valuesCount = values.docValueCount(); + + long previousRounded = Long.MIN_VALUE; + for (int i = 0; i < valuesCount; ++i) { + long value = values.nextValue(); + long rounded = roundingInfos[roundingIdx].rounding.round(value); + assert rounded >= previousRounded; + if (rounded == previousRounded) { + continue; + } + long bucketOrd = bucketOrds.add(rounded); + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + collectExistingBucket(sub, doc, bucketOrd); + } else { + collectBucket(sub, doc, bucketOrd); + while (roundingIdx < roundingInfos.length - 1 + && bucketOrds.size() > (targetBuckets * roundingInfos[roundingIdx].getMaximumInnerInterval())) { + increaseRounding(); + } + } + previousRounded = rounded; + } + } + } + + private void increaseRounding() { + try (LongHash oldBucketOrds = bucketOrds) { + LongHash newBucketOrds = new LongHash(1, context.bigArrays()); + long[] mergeMap = new long[(int) oldBucketOrds.size()]; + Rounding newRounding = roundingInfos[++roundingIdx].rounding; + for (int i = 0; i < oldBucketOrds.size(); i++) { + long oldKey = oldBucketOrds.get(i); + long newKey = newRounding.round(oldKey); + long newBucketOrd = newBucketOrds.add(newKey); + if (newBucketOrd >= 0) { + mergeMap[i] = newBucketOrd; + } else { + mergeMap[i] = -1 - newBucketOrd; + } + } + mergeBuckets(mergeMap, newBucketOrds.size()); + if (deferringCollector != null) { + deferringCollector.mergeBuckets(mergeMap); + } + bucketOrds = newBucketOrds; + } + } + }; + } + + @Override + public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { + assert owningBucketOrdinal == 0; + consumeBucketsAndMaybeBreak((int) bucketOrds.size()); + + long[] bucketOrdArray = new long[(int) bucketOrds.size()]; + for (int i = 0; i < bucketOrds.size(); i++) { + bucketOrdArray[i] = i; + } + + runDeferredCollections(bucketOrdArray); + + List buckets = new ArrayList<>((int) bucketOrds.size()); + for (long i = 0; i < bucketOrds.size(); i++) { + buckets.add(new InternalAutoDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), formatter, bucketAggregations(i))); + } + + // the contract of the histogram aggregation is that shards must return + // buckets ordered by key in ascending order + CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); + + // value source will be null for unmapped fields + InternalAutoDateHistogram.BucketInfo emptyBucketInfo = new InternalAutoDateHistogram.BucketInfo(roundingInfos, roundingIdx, + buildEmptySubAggregations()); + + return new InternalAutoDateHistogram(name, buckets, targetBuckets, emptyBucketInfo, formatter, pipelineAggregators(), metaData()); + } + + @Override + public InternalAggregation buildEmptyAggregation() { + InternalAutoDateHistogram.BucketInfo emptyBucketInfo = new InternalAutoDateHistogram.BucketInfo(roundingInfos, roundingIdx, + buildEmptySubAggregations()); + return new InternalAutoDateHistogram(name, Collections.emptyList(), targetBuckets, emptyBucketInfo, formatter, + pipelineAggregators(), metaData()); + } + + @Override + public void doClose() { + Releasables.close(bucketOrds); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java new file mode 100644 index 0000000000000..051f2f9f6e7c7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; +import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; +import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public final class AutoDateHistogramAggregatorFactory + extends ValuesSourceAggregatorFactory { + + private final int numBuckets; + private RoundingInfo[] roundingInfos; + + public AutoDateHistogramAggregatorFactory(String name, ValuesSourceConfig config, int numBuckets, RoundingInfo[] roundingInfos, + SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { + super(name, config, context, parent, subFactoriesBuilder, metaData); + this.numBuckets = numBuckets; + this.roundingInfos = roundingInfos; + } + + @Override + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, + List pipelineAggregators, Map metaData) throws IOException { + if (collectsFromSingleBucket == false) { + return asMultiBucketAggregator(this, context, parent); + } + return createAggregator(valuesSource, parent, pipelineAggregators, metaData); + } + + private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, + Map metaData) throws IOException { + return new AutoDateHistogramAggregator(name, factories, numBuckets, roundingInfos, valuesSource, config.format(), context, parent, + pipelineAggregators, + metaData); + } + + @Override + protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) + throws IOException { + return createAggregator(null, parent, pipelineAggregators, metaData); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 94dc18eae63e2..1114d30fef31e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -28,13 +28,13 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java new file mode 100644 index 0000000000000..27c195cbdae75 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -0,0 +1,601 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.KeyComparable; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.Map; +import java.util.Objects; + +/** + * Implementation of {@link Histogram}. + */ +public final class InternalAutoDateHistogram extends + InternalMultiBucketAggregation implements Histogram, HistogramFactory { + + public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { + + final long key; + final long docCount; + final InternalAggregations aggregations; + protected final transient DocValueFormat format; + + public Bucket(long key, long docCount, DocValueFormat format, + InternalAggregations aggregations) { + this.format = format; + this.key = key; + this.docCount = docCount; + this.aggregations = aggregations; + } + + /** + * Read from a stream. + */ + public Bucket(StreamInput in, DocValueFormat format) throws IOException { + this.format = format; + key = in.readLong(); + docCount = in.readVLong(); + aggregations = InternalAggregations.readAggregations(in); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || obj.getClass() != InternalAutoDateHistogram.Bucket.class) { + return false; + } + InternalAutoDateHistogram.Bucket that = (InternalAutoDateHistogram.Bucket) obj; + // No need to take the keyed and format parameters into account, + // they are already stored and tested on the InternalDateHistogram object + return key == that.key + && docCount == that.docCount + && Objects.equals(aggregations, that.aggregations); + } + + @Override + public int hashCode() { + return Objects.hash(getClass(), key, docCount, aggregations); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(key); + out.writeVLong(docCount); + aggregations.writeTo(out); + } + + @Override + public String getKeyAsString() { + return format.format(key).toString(); + } + + @Override + public Object getKey() { + return new DateTime(key, DateTimeZone.UTC); + } + + @Override + public long getDocCount() { + return docCount; + } + + @Override + public Aggregations getAggregations() { + return aggregations; + } + + Bucket reduce(List buckets, Rounding rounding, ReduceContext context) { + List aggregations = new ArrayList<>(buckets.size()); + long docCount = 0; + for (Bucket bucket : buckets) { + docCount += bucket.docCount; + aggregations.add((InternalAggregations) bucket.getAggregations()); + } + InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); + return new InternalAutoDateHistogram.Bucket(rounding.round(key), docCount, format, aggs); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + String keyAsString = format.format(key).toString(); + builder.startObject(); + if (format != DocValueFormat.RAW) { + builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), keyAsString); + } + builder.field(CommonFields.KEY.getPreferredName(), key); + builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); + aggregations.toXContentInternal(builder, params); + builder.endObject(); + return builder; + } + + @Override + public int compareKey(Bucket other) { + return Long.compare(key, other.key); + } + + public DocValueFormat getFormatter() { + return format; + } + } + + static class BucketInfo { + + final RoundingInfo[] roundingInfos; + final int roundingIdx; + final InternalAggregations emptySubAggregations; + + BucketInfo(RoundingInfo[] roundings, int roundingIdx, InternalAggregations subAggregations) { + this.roundingInfos = roundings; + this.roundingIdx = roundingIdx; + this.emptySubAggregations = subAggregations; + } + + BucketInfo(StreamInput in) throws IOException { + int size = in.readVInt(); + roundingInfos = new RoundingInfo[size]; + for (int i = 0; i < size; i++) { + roundingInfos[i] = new RoundingInfo(in); + } + roundingIdx = in.readVInt(); + emptySubAggregations = InternalAggregations.readAggregations(in); + } + + void writeTo(StreamOutput out) throws IOException { + out.writeVInt(roundingInfos.length); + for (RoundingInfo roundingInfo : roundingInfos) { + roundingInfo.writeTo(out); + } + out.writeVInt(roundingIdx); + emptySubAggregations.writeTo(out); + } + + @Override + public boolean equals(Object obj) { + if (obj == null || getClass() != obj.getClass()) { + return false; + } + BucketInfo that = (BucketInfo) obj; + return Objects.deepEquals(roundingInfos, that.roundingInfos) + && Objects.equals(roundingIdx, that.roundingIdx) + && Objects.equals(emptySubAggregations, that.emptySubAggregations); + } + + @Override + public int hashCode() { + return Objects.hash(getClass(), Arrays.hashCode(roundingInfos), roundingIdx, emptySubAggregations); + } + } + + private final List buckets; + private final DocValueFormat format; + private final BucketInfo bucketInfo; + private final int targetBuckets; + + + InternalAutoDateHistogram(String name, List buckets, int targetBuckets, BucketInfo emptyBucketInfo, DocValueFormat formatter, + List pipelineAggregators, Map metaData) { + super(name, pipelineAggregators, metaData); + this.buckets = buckets; + this.bucketInfo = emptyBucketInfo; + this.format = formatter; + this.targetBuckets = targetBuckets; + } + + /** + * Stream from a stream. + */ + public InternalAutoDateHistogram(StreamInput in) throws IOException { + super(in); + bucketInfo = new BucketInfo(in); + format = in.readNamedWriteable(DocValueFormat.class); + buckets = in.readList(stream -> new Bucket(stream, format)); + this.targetBuckets = in.readVInt(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + bucketInfo.writeTo(out); + out.writeNamedWriteable(format); + out.writeList(buckets); + out.writeVInt(targetBuckets); + } + + @Override + public String getWriteableName() { + return AutoDateHistogramAggregationBuilder.NAME; + } + + @Override + public List getBuckets() { + return Collections.unmodifiableList(buckets); + } + + DocValueFormat getFormatter() { + return format; + } + + public int getTargetBuckets() { + return targetBuckets; + } + + public BucketInfo getBucketInfo() { + return bucketInfo; + } + + @Override + public InternalAutoDateHistogram create(List buckets) { + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators(), metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.key, prototype.docCount, prototype.format, aggregations); + } + + private static class IteratorAndCurrent { + + private final Iterator iterator; + private Bucket current; + + IteratorAndCurrent(Iterator iterator) { + this.iterator = iterator; + current = iterator.next(); + } + + } + + /** + * This method works almost exactly the same as + * InternalDateHistogram#reduceBuckets(List, ReduceContext), the different + * here is that we need to round all the keys we see using the highest level + * rounding returned across all the shards so the resolution of the buckets + * is the same and they can be reduced together. + */ + private BucketReduceResult reduceBuckets(List aggregations, ReduceContext reduceContext) { + + // First we need to find the highest level rounding used across all the + // shards + int reduceRoundingIdx = 0; + for (InternalAggregation aggregation : aggregations) { + int aggRoundingIdx = ((InternalAutoDateHistogram) aggregation).bucketInfo.roundingIdx; + if (aggRoundingIdx > reduceRoundingIdx) { + reduceRoundingIdx = aggRoundingIdx; + } + } + // This rounding will be used to reduce all the buckets + RoundingInfo reduceRoundingInfo = bucketInfo.roundingInfos[reduceRoundingIdx]; + Rounding reduceRounding = reduceRoundingInfo.rounding; + + final PriorityQueue pq = new PriorityQueue(aggregations.size()) { + @Override + protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { + return a.current.key < b.current.key; + } + }; + for (InternalAggregation aggregation : aggregations) { + InternalAutoDateHistogram histogram = (InternalAutoDateHistogram) aggregation; + if (histogram.buckets.isEmpty() == false) { + pq.add(new IteratorAndCurrent(histogram.buckets.iterator())); + } + } + + List reducedBuckets = new ArrayList<>(); + if (pq.size() > 0) { + // list of buckets coming from different shards that have the same key + List currentBuckets = new ArrayList<>(); + double key = reduceRounding.round(pq.top().current.key); + + do { + final IteratorAndCurrent top = pq.top(); + + if (reduceRounding.round(top.current.key) != key) { + // the key changes, reduce what we already buffered and reset the buffer for current buckets + final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext); + reduceContext.consumeBucketsAndMaybeBreak(1); + reducedBuckets.add(reduced); + currentBuckets.clear(); + key = reduceRounding.round(top.current.key); + } + + currentBuckets.add(top.current); + + if (top.iterator.hasNext()) { + final Bucket next = top.iterator.next(); + assert next.key > top.current.key : "shards must return data sorted by key"; + top.current = next; + pq.updateTop(); + } else { + pq.pop(); + } + } while (pq.size() > 0); + + if (currentBuckets.isEmpty() == false) { + final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext); + reduceContext.consumeBucketsAndMaybeBreak(1); + reducedBuckets.add(reduced); + } + } + + return mergeBucketsIfNeeded(reducedBuckets, reduceRoundingIdx, reduceRoundingInfo, reduceContext); + } + + private BucketReduceResult mergeBucketsIfNeeded(List reducedBuckets, int reduceRoundingIdx, RoundingInfo reduceRoundingInfo, + ReduceContext reduceContext) { + while (reducedBuckets.size() > (targetBuckets * reduceRoundingInfo.getMaximumInnerInterval()) + && reduceRoundingIdx < bucketInfo.roundingInfos.length - 1) { + reduceRoundingIdx++; + reduceRoundingInfo = bucketInfo.roundingInfos[reduceRoundingIdx]; + reducedBuckets = mergeBuckets(reducedBuckets, reduceRoundingInfo.rounding, reduceContext); + } + return new BucketReduceResult(reducedBuckets, reduceRoundingInfo, reduceRoundingIdx); + } + + private List mergeBuckets(List reducedBuckets, Rounding reduceRounding, ReduceContext reduceContext) { + List mergedBuckets = new ArrayList<>(); + + List sameKeyedBuckets = new ArrayList<>(); + double key = Double.NaN; + for (Bucket bucket : reducedBuckets) { + long roundedBucketKey = reduceRounding.round(bucket.key); + if (Double.isNaN(key)) { + key = roundedBucketKey; + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); + sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); + } else if (roundedBucketKey == key) { + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); + sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); + } else { + reduceContext.consumeBucketsAndMaybeBreak(1); + mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext)); + sameKeyedBuckets.clear(); + key = roundedBucketKey; + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); + sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); + } + } + if (sameKeyedBuckets.isEmpty() == false) { + reduceContext.consumeBucketsAndMaybeBreak(1); + mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext)); + } + reducedBuckets = mergedBuckets; + return reducedBuckets; + } + + private static class BucketReduceResult { + List buckets; + RoundingInfo roundingInfo; + int roundingIdx; + + BucketReduceResult(List buckets, RoundingInfo roundingInfo, int roundingIdx) { + this.buckets = buckets; + this.roundingInfo = roundingInfo; + this.roundingIdx = roundingIdx; + + } + } + + private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, ReduceContext reduceContext) { + List list = currentResult.buckets; + if (list.isEmpty()) { + return currentResult; + } + int roundingIdx = getAppropriateRounding(list.get(0).key, list.get(list.size() - 1).key, currentResult.roundingIdx, + bucketInfo.roundingInfos); + RoundingInfo roundingInfo = bucketInfo.roundingInfos[roundingIdx]; + Rounding rounding = roundingInfo.rounding; + // merge buckets using the new rounding + list = mergeBuckets(list, rounding, reduceContext); + + Bucket lastBucket = null; + ListIterator iter = list.listIterator(); + InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(bucketInfo.emptySubAggregations), + reduceContext); + + // Add the empty buckets within the data, + // e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6 + while (iter.hasNext()) { + Bucket nextBucket = list.get(iter.nextIndex()); + if (lastBucket != null) { + long key = rounding.nextRoundingValue(lastBucket.key); + while (key < nextBucket.key) { + reduceContext.consumeBucketsAndMaybeBreak(1); + iter.add(new InternalAutoDateHistogram.Bucket(key, 0, format, reducedEmptySubAggs)); + key = rounding.nextRoundingValue(key); + } + assert key == nextBucket.key : "key: " + key + ", nextBucket.key: " + nextBucket.key; + } + lastBucket = iter.next(); + } + return new BucketReduceResult(list, roundingInfo, roundingIdx); + } + + private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, RoundingInfo[] roundings) { + if (roundingIdx == roundings.length - 1) { + return roundingIdx; + } + int currentRoundingIdx = roundingIdx; + + // Getting the accurate number of required buckets can be slow for large + // ranges at low roundings so get a rough estimate of the rounding first + // so we are at most 1 away from the correct rounding and then get the + // accurate rounding value + for (int i = currentRoundingIdx + 1; i < roundings.length; i++) { + long dataDuration = maxKey - minKey; + long roughEstimateRequiredBuckets = dataDuration / roundings[i].getRoughEstimateDurationMillis(); + if (roughEstimateRequiredBuckets < targetBuckets * roundings[i].getMaximumInnerInterval()) { + currentRoundingIdx = i - 1; + break; + } else if (i == roundingIdx - 1) { + currentRoundingIdx = i; + break; + } + } + + int requiredBuckets = 0; + do { + Rounding currentRounding = roundings[currentRoundingIdx].rounding; + long currentKey = minKey; + requiredBuckets = 0; + while (currentKey < maxKey) { + requiredBuckets++; + currentKey = currentRounding.nextRoundingValue(currentKey); + } + currentRoundingIdx++; + } while (requiredBuckets > (targetBuckets * roundings[roundingIdx].getMaximumInnerInterval()) + && currentRoundingIdx < roundings.length); + // The loop will increase past the correct rounding index here so we + // need to subtract one to get the rounding index we need + return currentRoundingIdx - 1; + } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { + BucketReduceResult reducedBucketsResult = reduceBuckets(aggregations, reduceContext); + + if (reduceContext.isFinalReduce()) { + // adding empty buckets if needed + reducedBucketsResult = addEmptyBuckets(reducedBucketsResult, reduceContext); + + // Adding empty buckets may have tipped us over the target so merge the buckets again if needed + reducedBucketsResult = mergeBucketsIfNeeded(reducedBucketsResult.buckets, reducedBucketsResult.roundingIdx, + reducedBucketsResult.roundingInfo, reduceContext); + + // Now finally see if we need to merge consecutive buckets together to make a coarser interval at the same rounding + reducedBucketsResult = maybeMergeConsecutiveBuckets(reducedBucketsResult, reduceContext); + } + + BucketInfo bucketInfo = new BucketInfo(this.bucketInfo.roundingInfos, reducedBucketsResult.roundingIdx, + this.bucketInfo.emptySubAggregations); + + return new InternalAutoDateHistogram(getName(), reducedBucketsResult.buckets, targetBuckets, bucketInfo, format, + pipelineAggregators(), getMetaData()); + } + + private BucketReduceResult maybeMergeConsecutiveBuckets(BucketReduceResult reducedBucketsResult, ReduceContext reduceContext) { + List buckets = reducedBucketsResult.buckets; + RoundingInfo roundingInfo = reducedBucketsResult.roundingInfo; + int roundingIdx = reducedBucketsResult.roundingIdx; + if (buckets.size() > targetBuckets) { + for (int interval : roundingInfo.innerIntervals) { + int resultingBuckets = buckets.size() / interval; + if (resultingBuckets <= targetBuckets) { + return mergeConsecutiveBuckets(buckets, interval, roundingIdx, roundingInfo, reduceContext); + } + } + } + return reducedBucketsResult; + } + + private BucketReduceResult mergeConsecutiveBuckets(List reducedBuckets, int mergeInterval, int roundingIdx, + RoundingInfo roundingInfo, ReduceContext reduceContext) { + List mergedBuckets = new ArrayList<>(); + List sameKeyedBuckets = new ArrayList<>(); + + double key = roundingInfo.rounding.round(reducedBuckets.get(0).key); + for (int i = 0; i < reducedBuckets.size(); i++) { + Bucket bucket = reducedBuckets.get(i); + if (i % mergeInterval == 0 && sameKeyedBuckets.isEmpty() == false) { + reduceContext.consumeBucketsAndMaybeBreak(1); + mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext)); + sameKeyedBuckets.clear(); + key = roundingInfo.rounding.round(bucket.key); + } + reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); + sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); + } + if (sameKeyedBuckets.isEmpty() == false) { + reduceContext.consumeBucketsAndMaybeBreak(1); + mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext)); + } + return new BucketReduceResult(mergedBuckets, roundingInfo, roundingIdx); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + builder.startArray(CommonFields.BUCKETS.getPreferredName()); + for (Bucket bucket : buckets) { + bucket.toXContent(builder, params); + } + builder.endArray(); + return builder; + } + + // HistogramFactory method impls + + @Override + public Number getKey(MultiBucketsAggregation.Bucket bucket) { + return ((Bucket) bucket).key; + } + + @Override + public Number nextKey(Number key) { + return bucketInfo.roundingInfos[bucketInfo.roundingIdx].rounding.nextRoundingValue(key.longValue()); + } + + @Override + public InternalAggregation createAggregation(List buckets) { + // convert buckets to the right type + List buckets2 = new ArrayList<>(buckets.size()); + for (Object b : buckets) { + buckets2.add((Bucket) b); + } + buckets2 = Collections.unmodifiableList(buckets2); + return new InternalAutoDateHistogram(name, buckets2, targetBuckets, bucketInfo, format, pipelineAggregators(), getMetaData()); + } + + @Override + public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) { + return new Bucket(key.longValue(), docCount, format, aggregations); + } + + @Override + protected boolean doEquals(Object obj) { + InternalAutoDateHistogram that = (InternalAutoDateHistogram) obj; + return Objects.equals(buckets, that.buckets) + && Objects.equals(format, that.format) + && Objects.equals(bucketInfo, that.bucketInfo); + } + + @Override + protected int doHashCode() { + return Objects.hash(buckets, format, bucketInfo); + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 84dec2c983e28..669bda5574d31 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -424,7 +424,7 @@ private void addEmptyBuckets(List list, ReduceContext reduceContext) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); key = nextKey(key).longValue(); } - assert key == nextBucket.key; + assert key == nextBucket.key : "key: " + key + ", nextBucket.key: " + nextBucket.key; } lastBucket = iter.next(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java new file mode 100644 index 0000000000000..caca44f9f2ea7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; + +import java.io.IOException; +import java.util.List; + +public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation implements Histogram { + + @Override + public String getType() { + return AutoDateHistogramAggregationBuilder.NAME; + } + + @Override + public List getBuckets() { + return buckets; + } + + private static ObjectParser PARSER = + new ObjectParser<>(ParsedAutoDateHistogram.class.getSimpleName(), true, ParsedAutoDateHistogram::new); + static { + declareMultiBucketAggregationFields(PARSER, + parser -> ParsedBucket.fromXContent(parser, false), + parser -> ParsedBucket.fromXContent(parser, true)); + } + + public static ParsedAutoDateHistogram fromXContent(XContentParser parser, String name) throws IOException { + ParsedAutoDateHistogram aggregation = PARSER.parse(parser, null); + aggregation.setName(name); + return aggregation; + } + + public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket { + + private Long key; + + @Override + public Object getKey() { + if (key != null) { + return new DateTime(key, DateTimeZone.UTC); + } + return null; + } + + @Override + public String getKeyAsString() { + String keyAsString = super.getKeyAsString(); + if (keyAsString != null) { + return keyAsString; + } + if (key != null) { + return Long.toString(key); + } + return null; + } + + @Override + protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { + return builder.field(CommonFields.KEY.getPreferredName(), key); + } + + static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException { + return parseXContent(parser, keyed, ParsedBucket::new, (p, bucket) -> bucket.key = p.longValue()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index 29d8e327d5cd7..e381240ca1f61 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFiltersTests; import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGridTests; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobalTests; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogramTests; import org.elasticsearch.search.aggregations.bucket.missing.InternalMissingTests; @@ -125,6 +126,7 @@ private static List getAggsTests() { aggsTests.add(new InternalGeoCentroidTests()); aggsTests.add(new InternalHistogramTests()); aggsTests.add(new InternalDateHistogramTests()); + aggsTests.add(new InternalAutoDateHistogramTests()); aggsTests.add(new LongTermsTests()); aggsTests.add(new DoubleTermsTests()); aggsTests.add(new StringTermsTests()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java new file mode 100644 index 0000000000000..3a10edf183376 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket; + +import org.elasticsearch.search.aggregations.BaseAggregationTestCase; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; + +public class AutoDateHistogramTests extends BaseAggregationTestCase { + + @Override + protected AutoDateHistogramAggregationBuilder createTestAggregatorBuilder() { + AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder(randomAlphaOfLengthBetween(1, 10)); + builder.field(INT_FIELD_NAME); + builder.setNumBuckets(randomIntBetween(1, 100000)); + if (randomBoolean()) { + builder.format("###.##"); + } + if (randomBoolean()) { + builder.missing(randomIntBetween(0, 10)); + } + if (randomBoolean()) { + builder.timeZone(randomDateTimeZone()); + } + return builder; + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java new file mode 100644 index 0000000000000..7cf29e3aa9cc5 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -0,0 +1,1332 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.MultiBucketConsumerService; +import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.hamcrest.Matchers; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.chrono.ISOChronology; +import org.joda.time.format.DateTimeFormat; +import org.joda.time.format.DateTimeFormatter; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Consumer; + +import static org.hamcrest.Matchers.containsString; + +public class AutoDateHistogramAggregatorTests extends AggregatorTestCase { + + private static final String DATE_FIELD = "date"; + private static final String INSTANT_FIELD = "instant"; + + private static final List dataset = Arrays.asList( + "2010-03-12T01:07:45", + "2010-04-27T03:43:34", + "2012-05-18T04:11:00", + "2013-05-29T05:11:31", + "2013-10-31T08:24:05", + "2015-02-13T13:09:32", + "2015-06-24T13:47:43", + "2015-11-13T16:14:34", + "2016-03-04T17:09:50", + "2017-12-12T22:55:46"); + + public void testMatchNoDocs() throws IOException { + testBothCases(new MatchNoDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + } + + public void testMatchAllDocs() throws IOException { + Query query = new MatchAllDocsQuery(); + + testSearchCase(query, dataset, + aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), + histogram -> assertEquals(10, histogram.getBuckets().size()) + ); + testSearchAndReduceCase(query, dataset, + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), + histogram -> assertEquals(8, histogram.getBuckets().size()) + ); + } + + public void testSubAggregations() throws IOException { + Query query = new MatchAllDocsQuery(); + testSearchAndReduceCase(query, dataset, + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD) + .subAggregation(AggregationBuilders.stats("stats").field(DATE_FIELD)), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(8, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2010-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + Stats stats = bucket.getAggregations().get("stats"); + assertEquals("2010-03-12T01:07:45.000Z", stats.getMinAsString()); + assertEquals("2010-04-27T03:43:34.000Z", stats.getMaxAsString()); + assertEquals(2L, stats.getCount()); + + bucket = buckets.get(1); + assertEquals("2011-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertTrue(Double.isInfinite(stats.getMin())); + assertTrue(Double.isInfinite(stats.getMax())); + assertEquals(0L, stats.getCount()); + + bucket = buckets.get(2); + assertEquals("2012-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2012-05-18T04:11:00.000Z", stats.getMinAsString()); + assertEquals("2012-05-18T04:11:00.000Z", stats.getMaxAsString()); + assertEquals(1L, stats.getCount()); + + bucket = buckets.get(3); + assertEquals("2013-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2013-05-29T05:11:31.000Z", stats.getMinAsString()); + assertEquals("2013-10-31T08:24:05.000Z", stats.getMaxAsString()); + assertEquals(2L, stats.getCount()); + + bucket = buckets.get(4); + assertEquals("2014-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertTrue(Double.isInfinite(stats.getMin())); + assertTrue(Double.isInfinite(stats.getMax())); + assertEquals(0L, stats.getCount()); + + bucket = buckets.get(5); + assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2015-02-13T13:09:32.000Z", stats.getMinAsString()); + assertEquals("2015-11-13T16:14:34.000Z", stats.getMaxAsString()); + assertEquals(3L, stats.getCount()); + + bucket = buckets.get(6); + assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2016-03-04T17:09:50.000Z", stats.getMinAsString()); + assertEquals("2016-03-04T17:09:50.000Z", stats.getMaxAsString()); + assertEquals(1L, stats.getCount()); + + bucket = buckets.get(7); + assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2017-12-12T22:55:46.000Z", stats.getMinAsString()); + assertEquals("2017-12-12T22:55:46.000Z", stats.getMaxAsString()); + assertEquals(1L, stats.getCount()); + }); + } + + public void testNoDocs() throws IOException { + Query query = new MatchNoDocsQuery(); + List dates = Collections.emptyList(); + Consumer aggregation = agg -> agg.setNumBuckets(10).field(DATE_FIELD); + + testSearchCase(query, dates, aggregation, + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + testSearchAndReduceCase(query, dates, aggregation, + histogram -> assertNull(histogram) + ); + } + + public void testAggregateWrongField() throws IOException { + testBothCases(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(10).field("wrong_field"), + histogram -> assertEquals(0, histogram.getBuckets().size()) + ); + } + + public void testIntervalYear() throws IOException { + testSearchCase(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2015-02-13T13:09:32.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2015-06-24T13:47:43.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2015-11-13T16:14:34.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2016-03-04T17:09:50.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-12-12T22:55:46.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + testSearchAndReduceCase(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + } + + public void testIntervalMonth() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(6, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-03-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-03-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-03-06T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + } + + public void testWithLargeNumberOfBuckets() { + Query query = new MatchAllDocsQuery(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> testSearchCase(query, dataset, + aggregation -> aggregation.setNumBuckets(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS+1).field(DATE_FIELD), + // since an exception is thrown, this assertion won't be invoked. + histogram -> assertTrue(false) + )); + assertThat(exception.getMessage(), containsString("must be less than")); + } + + public void testIntervalDay() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(4, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01", + "2017-02-02", + "2017-02-02", + "2017-02-03", + "2017-02-03", + "2017-02-03", + "2017-02-05" + ), + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + } + + public void testIntervalDayWithTZ() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(4, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-01-31T23:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T23:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-02T23:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-04T23:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-01-31T00:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-02T00:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-03T00:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-04T00:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + }); + } + + public void testIntervalHour() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(10, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T09:35:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T10:15:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T13:06:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T14:04:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T14:05:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(6); + assertEquals("2017-02-01T15:59:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(7); + assertEquals("2017-02-01T16:06:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(8); + assertEquals("2017-02-01T16:48:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(9); + assertEquals("2017-02-01T16:59:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(8, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T10:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T11:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T12:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T13:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T14:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(6); + assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(7); + assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + } + + public void testIntervalHourWithTZ() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(10, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T08:02:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T08:35:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T09:15:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T12:06:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T13:04:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T13:05:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(6); + assertEquals("2017-02-01T14:59:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(7); + assertEquals("2017-02-01T15:06:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(8); + assertEquals("2017-02-01T15:48:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(9); + assertEquals("2017-02-01T15:59:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(8, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T08:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T09:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T10:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T11:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T12:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T13:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(6); + assertEquals("2017-02-01T14:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(7); + assertEquals("2017-02-01T15:00:00.000-01:00", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + } + + public void testAllSecondIntervals() throws IOException { + DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + List dataset = new ArrayList<>(); + DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); + for (int i = 0; i < 600; i++) { + DateTime date = startDate.plusSeconds(i); + dataset.add(format.print(date)); + } + + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(600, buckets.size()); + for (int i = 0; i < 600; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusSeconds(i), bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } + }); + + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(120, buckets.size()); + for (int i = 0; i < 120; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusSeconds(i * 5), bucket.getKey()); + assertEquals(5, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(60, buckets.size()); + for (int i = 0; i < 60; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusSeconds(i * 10), bucket.getKey()); + assertEquals(10, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(20, buckets.size()); + for (int i = 0; i < 20; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusSeconds(i * 30), bucket.getKey()); + assertEquals(30, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(10, buckets.size()); + for (int i = 0; i < 10; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMinutes(i), bucket.getKey()); + assertEquals(60, bucket.getDocCount()); + } + }); + } + + public void testAllMinuteIntervals() throws IOException { + DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + List dataset = new ArrayList<>(); + DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); + for (int i = 0; i < 600; i++) { + DateTime date = startDate.plusMinutes(i); + dataset.add(format.print(date)); + } + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(600, buckets.size()); + for (int i = 0; i < 600; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMinutes(i), bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(120, buckets.size()); + for (int i = 0; i < 120; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMinutes(i * 5), bucket.getKey()); + assertEquals(5, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(60, buckets.size()); + for (int i = 0; i < 60; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMinutes(i * 10), bucket.getKey()); + assertEquals(10, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(20, buckets.size()); + for (int i = 0; i < 20; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMinutes(i * 30), bucket.getKey()); + assertEquals(30, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(10, buckets.size()); + for (int i = 0; i < 10; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusHours(i), bucket.getKey()); + assertEquals(60, bucket.getDocCount()); + } + }); + } + + public void testAllHourIntervals() throws IOException { + DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + List dataset = new ArrayList<>(); + DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); + for (int i = 0; i < 600; i++) { + DateTime date = startDate.plusHours(i); + dataset.add(format.print(date)); + } + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(600, buckets.size()); + for (int i = 0; i < 600; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusHours(i), bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(200, buckets.size()); + for (int i = 0; i < 200; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusHours(i * 3), bucket.getKey()); + assertEquals(3, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(50, buckets.size()); + for (int i = 0; i < 50; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusHours(i * 12), bucket.getKey()); + assertEquals(12, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(30).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(25, buckets.size()); + for (int i = 0; i < 25; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusDays(i), bucket.getKey()); + assertEquals(24, bucket.getDocCount()); + } + }); + } + + public void testAllDayIntervals() throws IOException { + DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + List dataset = new ArrayList<>(); + DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); + for (int i = 0; i < 700; i++) { + DateTime date = startDate.plusDays(i); + dataset.add(format.print(date)); + } + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(700).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(700, buckets.size()); + for (int i = 0; i < 700; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusDays(i), bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(100, buckets.size()); + for (int i = 0; i < 100; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusDays(i * 7), bucket.getKey()); + assertEquals(7, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(30).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(24, buckets.size()); + for (int i = 0; i < 24; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMonths(i), bucket.getKey()); + assertThat(bucket.getDocCount(), Matchers.lessThanOrEqualTo(31L)); + } + }); + } + + public void testAllMonthIntervals() throws IOException { + DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + List dataset = new ArrayList<>(); + DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); + for (int i = 0; i < 600; i++) { + DateTime date = startDate.plusMonths(i); + dataset.add(format.print(date)); + } + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(600, buckets.size()); + for (int i = 0; i < 600; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMonths(i), bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(200, buckets.size()); + for (int i = 0; i < 200; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusMonths(i * 3), bucket.getKey()); + assertEquals(3, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, + aggregation -> aggregation.setNumBuckets(60).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(50, buckets.size()); + for (int i = 0; i < 50; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusYears(i), bucket.getKey()); + assertEquals(12, bucket.getDocCount()); + } + }); + } + + public void testAllYearIntervals() throws IOException { + DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); + List dataset = new ArrayList<>(); + DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); + for (int i = 0; i < 600; i++) { + DateTime date = startDate.plusYears(i); + dataset.add(format.print(date)); + } + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(600, buckets.size()); + for (int i = 0; i < 600; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusYears(i), bucket.getKey()); + assertEquals(1, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(120, buckets.size()); + for (int i = 0; i < 120; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusYears(i * 5), bucket.getKey()); + assertEquals(5, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(60, buckets.size()); + for (int i = 0; i < 60; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusYears(i * 10), bucket.getKey()); + assertEquals(10, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(30, buckets.size()); + for (int i = 0; i < 30; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusYears(i * 20), bucket.getKey()); + assertEquals(20, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(20).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(12, buckets.size()); + for (int i = 0; i < 12; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusYears(i * 50), bucket.getKey()); + assertEquals(50, bucket.getDocCount()); + } + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(6, buckets.size()); + for (int i = 0; i < 6; i++) { + Histogram.Bucket bucket = buckets.get(i); + assertEquals(startDate.plusYears(i * 100), bucket.getKey()); + assertEquals(100, bucket.getDocCount()); + } + }); + } + + public void testInterval3Hour() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(10, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T09:35:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T10:15:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T13:06:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T14:04:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T14:05:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(6); + assertEquals("2017-02-01T15:59:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(7); + assertEquals("2017-02-01T16:06:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(8); + assertEquals("2017-02-01T16:48:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(9); + assertEquals("2017-02-01T16:59:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:00.000Z", + "2017-02-01T09:35:00.000Z", + "2017-02-01T10:15:00.000Z", + "2017-02-01T13:06:00.000Z", + "2017-02-01T14:04:00.000Z", + "2017-02-01T14:05:00.000Z", + "2017-02-01T15:59:00.000Z", + "2017-02-01T16:06:00.000Z", + "2017-02-01T16:48:00.000Z", + "2017-02-01T16:59:00.000Z" + ), + aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T12:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); + assertEquals(4, bucket.getDocCount()); + } + ); + } + + public void testIntervalMinute() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:35.000Z", + "2017-02-01T09:02:59.000Z", + "2017-02-01T09:15:37.000Z", + "2017-02-01T09:16:04.000Z", + "2017-02-01T09:16:42.000Z" + ), + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:02:35.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T09:02:59.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T09:15:37.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T09:16:04.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T09:16:42.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + } + ); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T09:02:35.000Z", + "2017-02-01T09:02:59.000Z", + "2017-02-01T09:15:37.000Z", + "2017-02-01T09:16:04.000Z", + "2017-02-01T09:16:42.000Z" + ), + aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(15, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T09:03:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T09:04:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T09:05:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T09:06:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T09:07:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(6); + assertEquals("2017-02-01T09:08:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(7); + assertEquals("2017-02-01T09:09:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(8); + assertEquals("2017-02-01T09:10:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(9); + assertEquals("2017-02-01T09:11:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(10); + assertEquals("2017-02-01T09:12:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(11); + assertEquals("2017-02-01T09:13:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(12); + assertEquals("2017-02-01T09:14:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(13); + assertEquals("2017-02-01T09:15:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(14); + assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + } + ); + } + + public void testIntervalSecond() throws IOException { + testSearchCase(new MatchAllDocsQuery(), + Arrays.asList("2017-02-01T00:00:05.015Z", "2017-02-01T00:00:07.299Z", "2017-02-01T00:00:07.074Z", + "2017-02-01T00:00:11.688Z", "2017-02-01T00:00:11.210Z", "2017-02-01T00:00:11.380Z"), + aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(3, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:07.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + }); + testSearchAndReduceCase(new MatchAllDocsQuery(), + Arrays.asList( + "2017-02-01T00:00:05.015Z", + "2017-02-01T00:00:07.299Z", + "2017-02-01T00:00:07.074Z", + "2017-02-01T00:00:11.688Z", + "2017-02-01T00:00:11.210Z", + "2017-02-01T00:00:11.380Z" + ), + aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), + histogram -> { + List buckets = histogram.getBuckets(); + assertEquals(7, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + + bucket = buckets.get(1); + assertEquals("2017-02-01T00:00:06.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(2); + assertEquals("2017-02-01T00:00:07.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + + bucket = buckets.get(3); + assertEquals("2017-02-01T00:00:08.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(4); + assertEquals("2017-02-01T00:00:09.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(5); + assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + + bucket = buckets.get(6); + assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + } + ); + } + + private void testSearchCase(Query query, List dataset, + Consumer configure, + Consumer verify) throws IOException { + executeTestCase(false, query, dataset, configure, verify); + } + + private void testSearchAndReduceCase(Query query, List dataset, + Consumer configure, + Consumer verify) throws IOException { + executeTestCase(true, query, dataset, configure, verify); + } + + private void testBothCases(Query query, List dataset, + Consumer configure, + Consumer verify) throws IOException { + testSearchCase(query, dataset, configure, verify); + testSearchAndReduceCase(query, dataset, configure, verify); + } + + @Override + protected IndexSettings createIndexSettings() { + Settings nodeSettings = Settings.builder() + .put("search.max_buckets", 100000).build(); + return new IndexSettings( + IndexMetaData.builder("_index").settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .creationDate(System.currentTimeMillis()) + .build(), + nodeSettings + ); + } + + private void executeTestCase(boolean reduced, Query query, List dataset, + Consumer configure, + Consumer verify) throws IOException { + + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + Document document = new Document(); + for (String date : dataset) { + if (frequently()) { + indexWriter.commit(); + } + + long instant = asLong(date); + document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); + document.add(new LongPoint(INSTANT_FIELD, instant)); + indexWriter.addDocument(document); + document.clear(); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + AutoDateHistogramAggregationBuilder aggregationBuilder = new AutoDateHistogramAggregationBuilder("_name"); + if (configure != null) { + configure.accept(aggregationBuilder); + } + + DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); + DateFieldMapper.DateFieldType fieldType = builder.fieldType(); + fieldType.setHasDocValues(true); + fieldType.setName(aggregationBuilder.field()); + + InternalAutoDateHistogram histogram; + if (reduced) { + histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, fieldType); + } else { + histogram = search(indexSearcher, query, aggregationBuilder, fieldType); + } + verify.accept(histogram); + } + } + } + + private static long asLong(String dateTime) { + return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(dateTime).getMillis(); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java new file mode 100644 index 0000000000000..389371efd79aa --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.bucket.histogram; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram.BucketInfo; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; +import org.joda.time.DateTime; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +import static org.elasticsearch.common.unit.TimeValue.timeValueHours; +import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; +import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; + +public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase { + + private DocValueFormat format; + private RoundingInfo[] roundingInfos; + + @Override + public void setUp() throws Exception { + super.setUp(); + format = randomNumericDocValueFormat(); + + roundingInfos = new RoundingInfo[6]; + roundingInfos[0] = new RoundingInfo(Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(), 1, 5, 10, 30); + roundingInfos[1] = new RoundingInfo(Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(), 1, 5, 10, 30); + roundingInfos[2] = new RoundingInfo(Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(), 1, 3, 12); + roundingInfos[3] = new RoundingInfo(Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(), 1, 7); + roundingInfos[4] = new RoundingInfo(Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(), 1, 3); + roundingInfos[5] = new RoundingInfo(Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(), 1, 10, 20, 50, 100); + } + + @Override + protected InternalAutoDateHistogram createTestInstance(String name, + List pipelineAggregators, + Map metaData, + InternalAggregations aggregations) { + int nbBuckets = randomNumberOfBuckets(); + int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1); + List buckets = new ArrayList<>(nbBuckets); + long startingDate = System.currentTimeMillis(); + + long interval = randomIntBetween(1, 3); + long intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis(); + + for (int i = 0; i < nbBuckets; i++) { + long key = startingDate + (intervalMillis * i); + buckets.add(i, new InternalAutoDateHistogram.Bucket(key, randomIntBetween(1, 100), format, aggregations)); + } + InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList()); + BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations); + + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + } + + @Override + protected void assertReduced(InternalAutoDateHistogram reduced, List inputs) { + int roundingIdx = 0; + for (InternalAutoDateHistogram histogram : inputs) { + if (histogram.getBucketInfo().roundingIdx > roundingIdx) { + roundingIdx = histogram.getBucketInfo().roundingIdx; + } + } + Map expectedCounts = new TreeMap<>(); + for (Histogram histogram : inputs) { + for (Histogram.Bucket bucket : histogram.getBuckets()) { + expectedCounts.compute(roundingInfos[roundingIdx].rounding.round(((DateTime) bucket.getKey()).getMillis()), + (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); + } + } + Map actualCounts = new TreeMap<>(); + for (Histogram.Bucket bucket : reduced.getBuckets()) { + actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), + (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); + } + assertEquals(expectedCounts, actualCounts); + } + + @Override + protected Writeable.Reader instanceReader() { + return InternalAutoDateHistogram::new; + } + + @Override + protected Class implementationClass() { + return ParsedAutoDateHistogram.class; + } + + @Override + protected InternalAutoDateHistogram mutateInstance(InternalAutoDateHistogram instance) { + String name = instance.getName(); + List buckets = instance.getBuckets(); + int targetBuckets = instance.getTargetBuckets(); + BucketInfo bucketInfo = instance.getBucketInfo(); + List pipelineAggregators = instance.pipelineAggregators(); + Map metaData = instance.getMetaData(); + switch (between(0, 3)) { + case 0: + name += randomAlphaOfLength(5); + break; + case 1: + buckets = new ArrayList<>(buckets); + buckets.add(new InternalAutoDateHistogram.Bucket(randomNonNegativeLong(), randomIntBetween(1, 100), format, + InternalAggregations.EMPTY)); + break; + case 2: + int roundingIdx = bucketInfo.roundingIdx == bucketInfo.roundingInfos.length - 1 ? 0 : bucketInfo.roundingIdx + 1; + bucketInfo = new BucketInfo(bucketInfo.roundingInfos, roundingIdx, bucketInfo.emptySubAggregations); + break; + case 3: + if (metaData == null) { + metaData = new HashMap<>(1); + } else { + metaData = new HashMap<>(instance.getMetaData()); + } + metaData.put(randomAlphaOfLength(15), randomInt()); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 67eba5281d9b4..612c1342cc5d3 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -85,7 +85,6 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.elasticsearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; /** * Base class for testing {@link Aggregator} implementations. @@ -227,7 +226,7 @@ public boolean shouldCache(Query query) throws IOException { }); when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class))); doAnswer(invocation -> { - /* Store the releasables so we can release them at the end of the test case. This is important because aggregations don't + /* Store the release-ables so we can release them at the end of the test case. This is important because aggregations don't * close their sub-aggregations. This is fairly similar to what the production code does. */ releasables.add((Releasable) invocation.getArguments()[0]); return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 838b0e315ea0e..15e44853a97ba 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -53,8 +53,10 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; +import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.ParsedAutoDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram; import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; @@ -181,6 +183,7 @@ public abstract class InternalAggregationTestCase map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); + map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c)); map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java index 952b6c027945b..6f0aebe23966b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java @@ -149,7 +149,8 @@ private void assertMultiBucketsAggregations(Aggregation expected, Aggregation ac protected void assertMultiBucketsAggregation(MultiBucketsAggregation expected, MultiBucketsAggregation actual, boolean checkOrder) { Class parsedClass = implementationClass(); assertNotNull("Parsed aggregation class must not be null", parsedClass); - assertTrue(parsedClass.isInstance(actual)); + assertTrue("Unexpected parsed class, expected instance of: " + actual + ", but was: " + parsedClass, + parsedClass.isInstance(actual)); assertTrue(expected instanceof InternalAggregation); assertEquals(expected.getName(), actual.getName()); From 414ac81154c0f7831150d704b5c3db2533694d99 Mon Sep 17 00:00:00 2001 From: Paul Sanwald Date: Fri, 13 Jul 2018 14:59:11 -0400 Subject: [PATCH 10/13] fix typo --- .../aggregations/bucket/autodatehistogram-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc index 28cb65ce6cc48..3bd430d03d5ac 100644 --- a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc @@ -215,7 +215,7 @@ the specified time zone. "by_day": { "buckets": [ { - "key_as_string": "2015-09-30T23:00:00.000-01:00", + "key_as_string": "2015-09-30T23:00:00.000-01:00", <1> "key": 1443657600000, "doc_count": 1 }, From 974499092c8957045541d7e91f515b5f15e1ad1d Mon Sep 17 00:00:00 2001 From: Paul Sanwald Date: Mon, 16 Jul 2018 13:39:08 -0400 Subject: [PATCH 11/13] Revert "fix typo" This reverts commit 414ac81154c0f7831150d704b5c3db2533694d99. --- .../aggregations/bucket/autodatehistogram-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc index 3bd430d03d5ac..28cb65ce6cc48 100644 --- a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc @@ -215,7 +215,7 @@ the specified time zone. "by_day": { "buckets": [ { - "key_as_string": "2015-09-30T23:00:00.000-01:00", <1> + "key_as_string": "2015-09-30T23:00:00.000-01:00", "key": 1443657600000, "doc_count": 1 }, From fe7fd093bc4b8d1369a6e84b352c304f5313271d Mon Sep 17 00:00:00 2001 From: Paul Sanwald Date: Mon, 16 Jul 2018 13:41:20 -0400 Subject: [PATCH 12/13] Revert "Adds a new auto-interval date histogram (#28993)" This reverts commit d88d76483c970592a141512383e1a6a9d80d0e1a. --- .../client/RestHighLevelClient.java | 3 - docs/reference/aggregations/bucket.asciidoc | 2 - .../autodatehistogram-aggregation.asciidoc | 283 ---- .../elasticsearch/search/SearchModule.java | 4 - .../bucket/BucketsAggregator.java | 13 - .../MergingBucketsDeferringCollector.java | 236 --- .../AutoDateHistogramAggregationBuilder.java | 218 --- .../AutoDateHistogramAggregator.java | 199 --- .../AutoDateHistogramAggregatorFactory.java | 72 - .../histogram/DateHistogramAggregator.java | 4 +- .../histogram/InternalAutoDateHistogram.java | 601 -------- .../histogram/InternalDateHistogram.java | 2 +- .../histogram/ParsedAutoDateHistogram.java | 91 -- .../aggregations/AggregationsTests.java | 2 - .../bucket/AutoDateHistogramTests.java | 44 - .../AutoDateHistogramAggregatorTests.java | 1332 ----------------- .../InternalAutoDateHistogramTests.java | 154 -- .../aggregations/AggregatorTestCase.java | 3 +- .../test/InternalAggregationTestCase.java | 3 - ...nternalMultiBucketAggregationTestCase.java | 3 +- 20 files changed, 6 insertions(+), 3263 deletions(-) delete mode 100644 docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc delete mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java delete mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java delete mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java delete mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java delete mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java delete mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java delete mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java delete mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java delete mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index b9367303f7fa4..736a49879188b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -85,10 +85,8 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedAutoDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram; import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; @@ -1349,7 +1347,6 @@ static List getDefaultNamedXContents() { map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); - map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c)); map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); diff --git a/docs/reference/aggregations/bucket.asciidoc b/docs/reference/aggregations/bucket.asciidoc index e4b17ea293232..3c8f3599981f9 100644 --- a/docs/reference/aggregations/bucket.asciidoc +++ b/docs/reference/aggregations/bucket.asciidoc @@ -19,8 +19,6 @@ setting named `search.max_buckets`. It is disabled by default (-1) but requests include::bucket/adjacency-matrix-aggregation.asciidoc[] -include::bucket/autodatehistogram-aggregation.asciidoc[] - include::bucket/children-aggregation.asciidoc[] include::bucket/composite-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc deleted file mode 100644 index 28cb65ce6cc48..0000000000000 --- a/docs/reference/aggregations/bucket/autodatehistogram-aggregation.asciidoc +++ /dev/null @@ -1,283 +0,0 @@ -[[search-aggregations-bucket-autodatehistogram-aggregation]] -=== Auto-interval Date Histogram Aggregation - -A multi-bucket aggregation similar to the <> except -instead of providing an interval to use as the width of each bucket, a target number of buckets is provided -indicating the number of buckets needed and the interval of the buckets is automatically chosen to best achieve -that target. The number of buckets returned will always be less than or equal to this target number. - -The buckets field is optional, and will default to 10 buckets if not specified. - -Requesting a target of 10 buckets. - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "sales_over_time" : { - "auto_date_histogram" : { - "field" : "date", - "buckets" : 10 - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -==== Keys - -Internally, a date is represented as a 64 bit number representing a timestamp -in milliseconds-since-the-epoch. These timestamps are returned as the bucket -++key++s. The `key_as_string` is the same timestamp converted to a formatted -date string using the format specified with the `format` parameter: - -TIP: If no `format` is specified, then it will use the first date -<> specified in the field mapping. - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "sales_over_time" : { - "auto_date_histogram" : { - "field" : "date", - "buckets" : 5, - "format" : "yyyy-MM-dd" <1> - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -<1> Supports expressive date <> - -Response: - -[source,js] --------------------------------------------------- -{ - ... - "aggregations": { - "sales_over_time": { - "buckets": [ - { - "key_as_string": "2015-01-01", - "key": 1420070400000, - "doc_count": 3 - }, - { - "key_as_string": "2015-02-01", - "key": 1422748800000, - "doc_count": 2 - }, - { - "key_as_string": "2015-03-01", - "key": 1425168000000, - "doc_count": 2 - } - ] - } - } -} --------------------------------------------------- -// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] - -=== Intervals - -The interval of the returned buckets is selected based on the data collected by the -aggregation so that the number of buckets returned is less than or equal to the number -requested. The possible intervals returned are: - -[horizontal] -seconds:: In multiples of 1, 5, 10 and 30 -minutes:: In multiples of 1, 5, 10 and 30 -hours:: In multiples of 1, 3 and 12 -days:: In multiples of 1, and 7 -months:: In multiples of 1, and 3 -years:: In multiples of 1, 5, 10, 20, 50 and 100 - -In the worst case, where the number of daily buckets are too many for the requested -number of buckets, the number of buckets returned will be 1/7th of the number of -buckets requested. - -==== Time Zone - -Date-times are stored in Elasticsearch in UTC. By default, all bucketing and -rounding is also done in UTC. The `time_zone` parameter can be used to indicate -that bucketing should use a different time zone. - -Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or -`-08:00`) or as a timezone id, an identifier used in the TZ database like -`America/Los_Angeles`. - -Consider the following example: - -[source,js] ---------------------------------- -PUT my_index/log/1?refresh -{ - "date": "2015-10-01T00:30:00Z" -} - -PUT my_index/log/2?refresh -{ - "date": "2015-10-01T01:30:00Z" -} - -PUT my_index/log/3?refresh -{ - "date": "2015-10-01T02:30:00Z" -} - -GET my_index/_search?size=0 -{ - "aggs": { - "by_day": { - "auto_date_histogram": { - "field": "date", - "buckets" : 3 - } - } - } -} ---------------------------------- -// CONSOLE - -UTC is used if no time zone is specified, three 1-hour buckets are returned -starting at midnight UTC on 1 October 2015: - -[source,js] ---------------------------------- -{ - ... - "aggregations": { - "by_day": { - "buckets": [ - { - "key_as_string": "2015-10-01T00:00:00.000Z", - "key": 1443657600000, - "doc_count": 1 - }, - { - "key_as_string": "2015-10-01T01:00:00.000Z", - "key": 1443661200000, - "doc_count": 1 - }, - { - "key_as_string": "2015-10-01T02:00:00.000Z", - "key": 1443664800000, - "doc_count": 1 - } - ] - } - } -} ---------------------------------- -// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] - -If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before -midnight UTC: - -[source,js] ---------------------------------- -GET my_index/_search?size=0 -{ - "aggs": { - "by_day": { - "auto_date_histogram": { - "field": "date", - "buckets" : 3, - "time_zone": "-01:00" - } - } - } -} ---------------------------------- -// CONSOLE -// TEST[continued] - - -Now three 1-hour buckets are still returned but the first bucket starts at -11:00pm on 30 September 2015 since that is the local time for the bucket in -the specified time zone. - -[source,js] ---------------------------------- -{ - ... - "aggregations": { - "by_day": { - "buckets": [ - { - "key_as_string": "2015-09-30T23:00:00.000-01:00", - "key": 1443657600000, - "doc_count": 1 - }, - { - "key_as_string": "2015-10-01T00:00:00.000-01:00", - "key": 1443661200000, - "doc_count": 1 - }, - { - "key_as_string": "2015-10-01T01:00:00.000-01:00", - "key": 1443664800000, - "doc_count": 1 - } - ] - } - } -} ---------------------------------- -// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] - -<1> The `key_as_string` value represents midnight on each day - in the specified time zone. - -WARNING: When using time zones that follow DST (daylight savings time) changes, -buckets close to the moment when those changes happen can have slightly different -sizes than neighbouring buckets. -For example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am, -clocks were turned forward 1 hour to 3am local time. If the result of the aggregation -was daily buckets, the bucket covering that day will only hold data for 23 hours -instead of the usual 24 hours for other buckets. The same is true for shorter intervals -like e.g. 12h. Here, we will have only a 11h bucket on the morning of 27 March when the -DST shift happens. - -==== Scripts - -Like with the normal <>, both document level -scripts and value level scripts are supported. This aggregation does not however, support the `min_doc_count`, -`extended_bounds` and `order` parameters. - -==== Missing value - -The `missing` parameter defines how documents that are missing a value should be treated. -By default they will be ignored but it is also possible to treat them as if they -had a value. - -[source,js] --------------------------------------------------- -POST /sales/_search?size=0 -{ - "aggs" : { - "sale_date" : { - "auto_date_histogram" : { - "field" : "date", - "buckets": 10, - "missing": "2000/01/01" <1> - } - } - } -} --------------------------------------------------- -// CONSOLE -// TEST[setup:sales] - -<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`. - diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index b00d91f260b8d..8188b69d6c046 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -109,10 +109,8 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing; @@ -398,8 +396,6 @@ private void registerAggregations(List plugins) { HistogramAggregationBuilder::parse).addResultReader(InternalHistogram::new)); registerAggregation(new AggregationSpec(DateHistogramAggregationBuilder.NAME, DateHistogramAggregationBuilder::new, DateHistogramAggregationBuilder::parse).addResultReader(InternalDateHistogram::new)); - registerAggregation(new AggregationSpec(AutoDateHistogramAggregationBuilder.NAME, AutoDateHistogramAggregationBuilder::new, - AutoDateHistogramAggregationBuilder::parse).addResultReader(InternalAutoDateHistogram::new)); registerAggregation(new AggregationSpec(GeoDistanceAggregationBuilder.NAME, GeoDistanceAggregationBuilder::new, GeoDistanceAggregationBuilder::parse).addResultReader(InternalGeoDistance::new)); registerAggregation(new AggregationSpec(GeoGridAggregationBuilder.NAME, GeoGridAggregationBuilder::new, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index 7b09ac9d61895..504758e7a4ec2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -84,19 +84,6 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do subCollector.collect(doc, bucketOrd); } - public final void mergeBuckets(long[] mergeMap, long newNumBuckets) { - try (IntArray oldDocCounts = docCounts) { - docCounts = bigArrays.newIntArray(newNumBuckets, true); - docCounts.fill(0, newNumBuckets, 0); - for (int i = 0; i < oldDocCounts.size(); i++) { - int docCount = oldDocCounts.get(i); - if (docCount != 0) { - docCounts.increment(mergeMap[i], docCount); - } - } - } - } - public IntArray getDocCounts() { return docCounts; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java deleted file mode 100644 index f357e9d286f54..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/MergingBucketsDeferringCollector.java +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.Scorer; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.packed.PackedInts; -import org.apache.lucene.util.packed.PackedLongValues; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.LongHash; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.BucketCollector; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -/** - * A specialization of {@link DeferringBucketCollector} that collects all - * matches and then is able to replay a given subset of buckets. Exposes - * mergeBuckets, which can be invoked by the aggregator when increasing the - * rounding interval. - */ -public class MergingBucketsDeferringCollector extends DeferringBucketCollector { - - List entries = new ArrayList<>(); - BucketCollector collector; - final SearchContext searchContext; - LeafReaderContext context; - PackedLongValues.Builder docDeltas; - PackedLongValues.Builder buckets; - long maxBucket = -1; - boolean finished = false; - LongHash selectedBuckets; - - public MergingBucketsDeferringCollector(SearchContext context) { - this.searchContext = context; - } - - @Override - public void setDeferredCollector(Iterable deferredCollectors) { - this.collector = BucketCollector.wrap(deferredCollectors); - } - - @Override - public boolean needsScores() { - if (collector == null) { - throw new IllegalStateException(); - } - return collector.needsScores(); - } - - @Override - public void preCollection() throws IOException { - collector.preCollection(); - } - - private void finishLeaf() { - if (context != null) { - entries.add(new Entry(context, docDeltas.build(), buckets.build())); - } - context = null; - docDeltas = null; - buckets = null; - } - - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { - finishLeaf(); - - context = ctx; - docDeltas = PackedLongValues.packedBuilder(PackedInts.DEFAULT); - buckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); - - return new LeafBucketCollector() { - int lastDoc = 0; - - @Override - public void collect(int doc, long bucket) { - docDeltas.add(doc - lastDoc); - buckets.add(bucket); - lastDoc = doc; - maxBucket = Math.max(maxBucket, bucket); - } - }; - } - - public void mergeBuckets(long[] mergeMap) { - - List newEntries = new ArrayList<>(entries.size()); - for (Entry sourceEntry : entries) { - PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); - for (PackedLongValues.Iterator itr = sourceEntry.buckets.iterator(); itr.hasNext();) { - long bucket = itr.next(); - newBuckets.add(mergeMap[Math.toIntExact(bucket)]); - } - newEntries.add(new Entry(sourceEntry.context, sourceEntry.docDeltas, newBuckets.build())); - } - entries = newEntries; - - // if there are buckets that have been collected in the current segment - // we need to update the bucket ordinals there too - if (buckets.size() > 0) { - PackedLongValues currentBuckets = buckets.build(); - PackedLongValues.Builder newBuckets = PackedLongValues.packedBuilder(PackedInts.DEFAULT); - for (PackedLongValues.Iterator itr = currentBuckets.iterator(); itr.hasNext();) { - long bucket = itr.next(); - newBuckets.add(mergeMap[Math.toIntExact(bucket)]); - } - buckets = newBuckets; - } - } - - @Override - public void postCollection() { - finishLeaf(); - finished = true; - } - - /** - * Replay the wrapped collector, but only on a selection of buckets. - */ - @Override - public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { - if (finished == false) { - throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); - } - if (this.selectedBuckets != null) { - throw new IllegalStateException("Already been replayed"); - } - - final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE); - for (long bucket : selectedBuckets) { - hash.add(bucket); - } - this.selectedBuckets = hash; - - boolean needsScores = collector.needsScores(); - Weight weight = null; - if (needsScores) { - weight = searchContext.searcher().createNormalizedWeight(searchContext.query(), true); - } - for (Entry entry : entries) { - final LeafBucketCollector leafCollector = collector.getLeafCollector(entry.context); - DocIdSetIterator docIt = null; - if (needsScores && entry.docDeltas.size() > 0) { - Scorer scorer = weight.scorer(entry.context); - // We don't need to check if the scorer is null - // since we are sure that there are documents to replay - // (entry.docDeltas it not empty). - docIt = scorer.iterator(); - leafCollector.setScorer(scorer); - } - final PackedLongValues.Iterator docDeltaIterator = entry.docDeltas.iterator(); - final PackedLongValues.Iterator buckets = entry.buckets.iterator(); - int doc = 0; - for (long i = 0, end = entry.docDeltas.size(); i < end; ++i) { - doc += docDeltaIterator.next(); - final long bucket = buckets.next(); - final long rebasedBucket = hash.find(bucket); - if (rebasedBucket != -1) { - if (needsScores) { - if (docIt.docID() < doc) { - docIt.advance(doc); - } - // aggregations should only be replayed on matching - // documents - assert docIt.docID() == doc; - } - leafCollector.collect(doc, rebasedBucket); - } - } - } - - collector.postCollection(); - } - - /** - * Wrap the provided aggregator so that it behaves (almost) as if it had - * been collected directly. - */ - @Override - public Aggregator wrap(final Aggregator in) { - - return new WrappedAggregator(in) { - - @Override - public InternalAggregation buildAggregation(long bucket) throws IOException { - if (selectedBuckets == null) { - throw new IllegalStateException("Collection has not been replayed yet."); - } - final long rebasedBucket = selectedBuckets.find(bucket); - if (rebasedBucket == -1) { - throw new IllegalStateException("Cannot build for a bucket which has not been collected [" + bucket + "]"); - } - return in.buildAggregation(rebasedBucket); - } - - }; - } - - private static class Entry { - final LeafReaderContext context; - final PackedLongValues docDeltas; - final PackedLongValues buckets; - - Entry(LeafReaderContext context, PackedLongValues docDeltas, PackedLongValues buckets) { - this.context = context; - this.docDeltas = docDeltas; - this.buckets = buckets; - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java deleted file mode 100644 index 366060835d891..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; -import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; -import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; -import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; -import java.util.Objects; - -public class AutoDateHistogramAggregationBuilder - extends ValuesSourceAggregationBuilder { - - public static final String NAME = "auto_date_histogram"; - - public static final ParseField NUM_BUCKETS_FIELD = new ParseField("buckets"); - - private static final ObjectParser PARSER; - static { - PARSER = new ObjectParser<>(AutoDateHistogramAggregationBuilder.NAME); - ValuesSourceParserHelper.declareNumericFields(PARSER, true, true, true); - - PARSER.declareInt(AutoDateHistogramAggregationBuilder::setNumBuckets, NUM_BUCKETS_FIELD); - } - - public static AutoDateHistogramAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { - return PARSER.parse(parser, new AutoDateHistogramAggregationBuilder(aggregationName), null); - } - - private int numBuckets = 10; - - /** Create a new builder with the given name. */ - public AutoDateHistogramAggregationBuilder(String name) { - super(name, ValuesSourceType.NUMERIC, ValueType.DATE); - } - - /** Read from a stream, for internal use only. */ - public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { - super(in, ValuesSourceType.NUMERIC, ValueType.DATE); - numBuckets = in.readVInt(); - } - - protected AutoDateHistogramAggregationBuilder(AutoDateHistogramAggregationBuilder clone, Builder factoriesBuilder, - Map metaData) { - super(clone, factoriesBuilder, metaData); - this.numBuckets = clone.numBuckets; - } - - @Override - protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map metaData) { - return new AutoDateHistogramAggregationBuilder(this, factoriesBuilder, metaData); - } - - @Override - protected void innerWriteTo(StreamOutput out) throws IOException { - out.writeVInt(numBuckets); - } - - @Override - public String getType() { - return NAME; - } - - public AutoDateHistogramAggregationBuilder setNumBuckets(int numBuckets) { - if (numBuckets <= 0) { - throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName() + " must be greater than 0 for [" + name + "]"); - } - this.numBuckets = numBuckets; - return this; - } - - public int getNumBuckets() { - return numBuckets; - } - - @Override - protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, - AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - RoundingInfo[] roundings = new RoundingInfo[6]; - roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE), 1000L, 1, 5, 10, 30); - roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR), 60 * 1000L, 1, 5, 10, 30); - roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY), 60 * 60 * 1000L, 1, 3, 12); - roundings[3] = new RoundingInfo(createRounding(DateTimeUnit.DAY_OF_MONTH), 24 * 60 * 60 * 1000L, 1, 7); - roundings[4] = new RoundingInfo(createRounding(DateTimeUnit.MONTH_OF_YEAR), 30 * 24 * 60 * 60 * 1000L, 1, 3); - roundings[5] = new RoundingInfo(createRounding(DateTimeUnit.YEAR_OF_CENTURY), 365 * 24 * 60 * 60 * 1000L, 1, 5, 10, 20, 50, 100); - - int maxRoundingInterval = Arrays.stream(roundings,0, roundings.length-1) - .map(rounding -> rounding.innerIntervals) - .flatMapToInt(Arrays::stream) - .boxed() - .reduce(Integer::max).get(); - Settings settings = context.getQueryShardContext().getIndexSettings().getNodeSettings(); - int maxBuckets = MultiBucketConsumerService.MAX_BUCKET_SETTING.get(settings); - int bucketCeiling = maxBuckets / maxRoundingInterval; - if (numBuckets > bucketCeiling) { - throw new IllegalArgumentException(NUM_BUCKETS_FIELD.getPreferredName()+ - " must be less than " + bucketCeiling); - } - return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData); - } - - private Rounding createRounding(DateTimeUnit interval) { - Rounding.Builder tzRoundingBuilder = Rounding.builder(interval); - if (timeZone() != null) { - tzRoundingBuilder.timeZone(timeZone()); - } - Rounding rounding = tzRoundingBuilder.build(); - return rounding; - } - - @Override - protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.field(NUM_BUCKETS_FIELD.getPreferredName(), numBuckets); - return builder; - } - - @Override - protected int innerHashCode() { - return Objects.hash(numBuckets); - } - - @Override - protected boolean innerEquals(Object obj) { - AutoDateHistogramAggregationBuilder other = (AutoDateHistogramAggregationBuilder) obj; - return Objects.equals(numBuckets, other.numBuckets); - } - - public static class RoundingInfo implements Writeable { - final Rounding rounding; - final int[] innerIntervals; - final long roughEstimateDurationMillis; - - public RoundingInfo(Rounding rounding, long roughEstimateDurationMillis, int... innerIntervals) { - this.rounding = rounding; - this.roughEstimateDurationMillis = roughEstimateDurationMillis; - this.innerIntervals = innerIntervals; - } - - public RoundingInfo(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); - roughEstimateDurationMillis = in.readVLong(); - innerIntervals = in.readIntArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); - out.writeVLong(roughEstimateDurationMillis); - out.writeIntArray(innerIntervals); - } - - public int getMaximumInnerInterval() { - return innerIntervals[innerIntervals.length - 1]; - } - - public long getRoughEstimateDurationMillis() { - return roughEstimateDurationMillis; - } - - @Override - public int hashCode() { - return Objects.hash(rounding, Arrays.hashCode(innerIntervals)); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj.getClass() != getClass()) { - return false; - } - RoundingInfo other = (RoundingInfo) obj; - return Objects.equals(rounding, other.rounding) && - Objects.deepEquals(innerIntervals, other.innerIntervals); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java deleted file mode 100644 index f86145386f1df..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.util.LongHash; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.BucketOrder; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.LeafBucketCollector; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; -import org.elasticsearch.search.aggregations.bucket.DeferableBucketAggregator; -import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; -import org.elasticsearch.search.aggregations.bucket.MergingBucketsDeferringCollector; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; - -/** - * An aggregator for date values. Every date is rounded down using a configured - * {@link Rounding}. - * - * @see Rounding - */ -class AutoDateHistogramAggregator extends DeferableBucketAggregator { - - private final ValuesSource.Numeric valuesSource; - private final DocValueFormat formatter; - private final RoundingInfo[] roundingInfos; - private int roundingIdx = 0; - - private LongHash bucketOrds; - private int targetBuckets; - private MergingBucketsDeferringCollector deferringCollector; - - AutoDateHistogramAggregator(String name, AggregatorFactories factories, int numBuckets, RoundingInfo[] roundingInfos, - @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext aggregationContext, Aggregator parent, - List pipelineAggregators, Map metaData) throws IOException { - - super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); - this.targetBuckets = numBuckets; - this.valuesSource = valuesSource; - this.formatter = formatter; - this.roundingInfos = roundingInfos; - - bucketOrds = new LongHash(1, aggregationContext.bigArrays()); - - } - - @Override - public boolean needsScores() { - return (valuesSource != null && valuesSource.needsScores()) || super.needsScores(); - } - - @Override - protected boolean shouldDefer(Aggregator aggregator) { - return true; - } - - @Override - public DeferringBucketCollector getDeferringCollector() { - deferringCollector = new MergingBucketsDeferringCollector(context); - return deferringCollector; - } - - @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, - final LeafBucketCollector sub) throws IOException { - if (valuesSource == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; - } - final SortedNumericDocValues values = valuesSource.longValues(ctx); - return new LeafBucketCollectorBase(sub, values) { - @Override - public void collect(int doc, long bucket) throws IOException { - assert bucket == 0; - if (values.advanceExact(doc)) { - final int valuesCount = values.docValueCount(); - - long previousRounded = Long.MIN_VALUE; - for (int i = 0; i < valuesCount; ++i) { - long value = values.nextValue(); - long rounded = roundingInfos[roundingIdx].rounding.round(value); - assert rounded >= previousRounded; - if (rounded == previousRounded) { - continue; - } - long bucketOrd = bucketOrds.add(rounded); - if (bucketOrd < 0) { // already seen - bucketOrd = -1 - bucketOrd; - collectExistingBucket(sub, doc, bucketOrd); - } else { - collectBucket(sub, doc, bucketOrd); - while (roundingIdx < roundingInfos.length - 1 - && bucketOrds.size() > (targetBuckets * roundingInfos[roundingIdx].getMaximumInnerInterval())) { - increaseRounding(); - } - } - previousRounded = rounded; - } - } - } - - private void increaseRounding() { - try (LongHash oldBucketOrds = bucketOrds) { - LongHash newBucketOrds = new LongHash(1, context.bigArrays()); - long[] mergeMap = new long[(int) oldBucketOrds.size()]; - Rounding newRounding = roundingInfos[++roundingIdx].rounding; - for (int i = 0; i < oldBucketOrds.size(); i++) { - long oldKey = oldBucketOrds.get(i); - long newKey = newRounding.round(oldKey); - long newBucketOrd = newBucketOrds.add(newKey); - if (newBucketOrd >= 0) { - mergeMap[i] = newBucketOrd; - } else { - mergeMap[i] = -1 - newBucketOrd; - } - } - mergeBuckets(mergeMap, newBucketOrds.size()); - if (deferringCollector != null) { - deferringCollector.mergeBuckets(mergeMap); - } - bucketOrds = newBucketOrds; - } - } - }; - } - - @Override - public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - assert owningBucketOrdinal == 0; - consumeBucketsAndMaybeBreak((int) bucketOrds.size()); - - long[] bucketOrdArray = new long[(int) bucketOrds.size()]; - for (int i = 0; i < bucketOrds.size(); i++) { - bucketOrdArray[i] = i; - } - - runDeferredCollections(bucketOrdArray); - - List buckets = new ArrayList<>((int) bucketOrds.size()); - for (long i = 0; i < bucketOrds.size(); i++) { - buckets.add(new InternalAutoDateHistogram.Bucket(bucketOrds.get(i), bucketDocCount(i), formatter, bucketAggregations(i))); - } - - // the contract of the histogram aggregation is that shards must return - // buckets ordered by key in ascending order - CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); - - // value source will be null for unmapped fields - InternalAutoDateHistogram.BucketInfo emptyBucketInfo = new InternalAutoDateHistogram.BucketInfo(roundingInfos, roundingIdx, - buildEmptySubAggregations()); - - return new InternalAutoDateHistogram(name, buckets, targetBuckets, emptyBucketInfo, formatter, pipelineAggregators(), metaData()); - } - - @Override - public InternalAggregation buildEmptyAggregation() { - InternalAutoDateHistogram.BucketInfo emptyBucketInfo = new InternalAutoDateHistogram.BucketInfo(roundingInfos, roundingIdx, - buildEmptySubAggregations()); - return new InternalAutoDateHistogram(name, Collections.emptyList(), targetBuckets, emptyBucketInfo, formatter, - pipelineAggregators(), metaData()); - } - - @Override - public void doClose() { - Releasables.close(bucketOrds); - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java deleted file mode 100644 index 051f2f9f6e7c7..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorFactory.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.support.ValuesSource; -import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; -import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; -import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.List; -import java.util.Map; - -public final class AutoDateHistogramAggregatorFactory - extends ValuesSourceAggregatorFactory { - - private final int numBuckets; - private RoundingInfo[] roundingInfos; - - public AutoDateHistogramAggregatorFactory(String name, ValuesSourceConfig config, int numBuckets, RoundingInfo[] roundingInfos, - SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, - Map metaData) throws IOException { - super(name, config, context, parent, subFactoriesBuilder, metaData); - this.numBuckets = numBuckets; - this.roundingInfos = roundingInfos; - } - - @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggregator parent, boolean collectsFromSingleBucket, - List pipelineAggregators, Map metaData) throws IOException { - if (collectsFromSingleBucket == false) { - return asMultiBucketAggregator(this, context, parent); - } - return createAggregator(valuesSource, parent, pipelineAggregators, metaData); - } - - private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, - Map metaData) throws IOException { - return new AutoDateHistogramAggregator(name, factories, numBuckets, roundingInfos, valuesSource, config.format(), context, parent, - pipelineAggregators, - metaData); - } - - @Override - protected Aggregator createUnmapped(Aggregator parent, List pipelineAggregators, Map metaData) - throws IOException { - return createAggregator(null, parent, pipelineAggregators, metaData); - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 1114d30fef31e..94dc18eae63e2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -28,13 +28,13 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; +import org.elasticsearch.search.aggregations.BucketOrder; +import org.elasticsearch.search.aggregations.InternalOrder; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java deleted file mode 100644 index 27c195cbdae75..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ /dev/null @@ -1,601 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.search.aggregations.InternalAggregation; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; -import org.elasticsearch.search.aggregations.KeyComparable; -import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.ListIterator; -import java.util.Map; -import java.util.Objects; - -/** - * Implementation of {@link Histogram}. - */ -public final class InternalAutoDateHistogram extends - InternalMultiBucketAggregation implements Histogram, HistogramFactory { - - public static class Bucket extends InternalMultiBucketAggregation.InternalBucket implements Histogram.Bucket, KeyComparable { - - final long key; - final long docCount; - final InternalAggregations aggregations; - protected final transient DocValueFormat format; - - public Bucket(long key, long docCount, DocValueFormat format, - InternalAggregations aggregations) { - this.format = format; - this.key = key; - this.docCount = docCount; - this.aggregations = aggregations; - } - - /** - * Read from a stream. - */ - public Bucket(StreamInput in, DocValueFormat format) throws IOException { - this.format = format; - key = in.readLong(); - docCount = in.readVLong(); - aggregations = InternalAggregations.readAggregations(in); - } - - @Override - public boolean equals(Object obj) { - if (obj == null || obj.getClass() != InternalAutoDateHistogram.Bucket.class) { - return false; - } - InternalAutoDateHistogram.Bucket that = (InternalAutoDateHistogram.Bucket) obj; - // No need to take the keyed and format parameters into account, - // they are already stored and tested on the InternalDateHistogram object - return key == that.key - && docCount == that.docCount - && Objects.equals(aggregations, that.aggregations); - } - - @Override - public int hashCode() { - return Objects.hash(getClass(), key, docCount, aggregations); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeLong(key); - out.writeVLong(docCount); - aggregations.writeTo(out); - } - - @Override - public String getKeyAsString() { - return format.format(key).toString(); - } - - @Override - public Object getKey() { - return new DateTime(key, DateTimeZone.UTC); - } - - @Override - public long getDocCount() { - return docCount; - } - - @Override - public Aggregations getAggregations() { - return aggregations; - } - - Bucket reduce(List buckets, Rounding rounding, ReduceContext context) { - List aggregations = new ArrayList<>(buckets.size()); - long docCount = 0; - for (Bucket bucket : buckets) { - docCount += bucket.docCount; - aggregations.add((InternalAggregations) bucket.getAggregations()); - } - InternalAggregations aggs = InternalAggregations.reduce(aggregations, context); - return new InternalAutoDateHistogram.Bucket(rounding.round(key), docCount, format, aggs); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - String keyAsString = format.format(key).toString(); - builder.startObject(); - if (format != DocValueFormat.RAW) { - builder.field(CommonFields.KEY_AS_STRING.getPreferredName(), keyAsString); - } - builder.field(CommonFields.KEY.getPreferredName(), key); - builder.field(CommonFields.DOC_COUNT.getPreferredName(), docCount); - aggregations.toXContentInternal(builder, params); - builder.endObject(); - return builder; - } - - @Override - public int compareKey(Bucket other) { - return Long.compare(key, other.key); - } - - public DocValueFormat getFormatter() { - return format; - } - } - - static class BucketInfo { - - final RoundingInfo[] roundingInfos; - final int roundingIdx; - final InternalAggregations emptySubAggregations; - - BucketInfo(RoundingInfo[] roundings, int roundingIdx, InternalAggregations subAggregations) { - this.roundingInfos = roundings; - this.roundingIdx = roundingIdx; - this.emptySubAggregations = subAggregations; - } - - BucketInfo(StreamInput in) throws IOException { - int size = in.readVInt(); - roundingInfos = new RoundingInfo[size]; - for (int i = 0; i < size; i++) { - roundingInfos[i] = new RoundingInfo(in); - } - roundingIdx = in.readVInt(); - emptySubAggregations = InternalAggregations.readAggregations(in); - } - - void writeTo(StreamOutput out) throws IOException { - out.writeVInt(roundingInfos.length); - for (RoundingInfo roundingInfo : roundingInfos) { - roundingInfo.writeTo(out); - } - out.writeVInt(roundingIdx); - emptySubAggregations.writeTo(out); - } - - @Override - public boolean equals(Object obj) { - if (obj == null || getClass() != obj.getClass()) { - return false; - } - BucketInfo that = (BucketInfo) obj; - return Objects.deepEquals(roundingInfos, that.roundingInfos) - && Objects.equals(roundingIdx, that.roundingIdx) - && Objects.equals(emptySubAggregations, that.emptySubAggregations); - } - - @Override - public int hashCode() { - return Objects.hash(getClass(), Arrays.hashCode(roundingInfos), roundingIdx, emptySubAggregations); - } - } - - private final List buckets; - private final DocValueFormat format; - private final BucketInfo bucketInfo; - private final int targetBuckets; - - - InternalAutoDateHistogram(String name, List buckets, int targetBuckets, BucketInfo emptyBucketInfo, DocValueFormat formatter, - List pipelineAggregators, Map metaData) { - super(name, pipelineAggregators, metaData); - this.buckets = buckets; - this.bucketInfo = emptyBucketInfo; - this.format = formatter; - this.targetBuckets = targetBuckets; - } - - /** - * Stream from a stream. - */ - public InternalAutoDateHistogram(StreamInput in) throws IOException { - super(in); - bucketInfo = new BucketInfo(in); - format = in.readNamedWriteable(DocValueFormat.class); - buckets = in.readList(stream -> new Bucket(stream, format)); - this.targetBuckets = in.readVInt(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - bucketInfo.writeTo(out); - out.writeNamedWriteable(format); - out.writeList(buckets); - out.writeVInt(targetBuckets); - } - - @Override - public String getWriteableName() { - return AutoDateHistogramAggregationBuilder.NAME; - } - - @Override - public List getBuckets() { - return Collections.unmodifiableList(buckets); - } - - DocValueFormat getFormatter() { - return format; - } - - public int getTargetBuckets() { - return targetBuckets; - } - - public BucketInfo getBucketInfo() { - return bucketInfo; - } - - @Override - public InternalAutoDateHistogram create(List buckets) { - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators(), metaData); - } - - @Override - public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { - return new Bucket(prototype.key, prototype.docCount, prototype.format, aggregations); - } - - private static class IteratorAndCurrent { - - private final Iterator iterator; - private Bucket current; - - IteratorAndCurrent(Iterator iterator) { - this.iterator = iterator; - current = iterator.next(); - } - - } - - /** - * This method works almost exactly the same as - * InternalDateHistogram#reduceBuckets(List, ReduceContext), the different - * here is that we need to round all the keys we see using the highest level - * rounding returned across all the shards so the resolution of the buckets - * is the same and they can be reduced together. - */ - private BucketReduceResult reduceBuckets(List aggregations, ReduceContext reduceContext) { - - // First we need to find the highest level rounding used across all the - // shards - int reduceRoundingIdx = 0; - for (InternalAggregation aggregation : aggregations) { - int aggRoundingIdx = ((InternalAutoDateHistogram) aggregation).bucketInfo.roundingIdx; - if (aggRoundingIdx > reduceRoundingIdx) { - reduceRoundingIdx = aggRoundingIdx; - } - } - // This rounding will be used to reduce all the buckets - RoundingInfo reduceRoundingInfo = bucketInfo.roundingInfos[reduceRoundingIdx]; - Rounding reduceRounding = reduceRoundingInfo.rounding; - - final PriorityQueue pq = new PriorityQueue(aggregations.size()) { - @Override - protected boolean lessThan(IteratorAndCurrent a, IteratorAndCurrent b) { - return a.current.key < b.current.key; - } - }; - for (InternalAggregation aggregation : aggregations) { - InternalAutoDateHistogram histogram = (InternalAutoDateHistogram) aggregation; - if (histogram.buckets.isEmpty() == false) { - pq.add(new IteratorAndCurrent(histogram.buckets.iterator())); - } - } - - List reducedBuckets = new ArrayList<>(); - if (pq.size() > 0) { - // list of buckets coming from different shards that have the same key - List currentBuckets = new ArrayList<>(); - double key = reduceRounding.round(pq.top().current.key); - - do { - final IteratorAndCurrent top = pq.top(); - - if (reduceRounding.round(top.current.key) != key) { - // the key changes, reduce what we already buffered and reset the buffer for current buckets - final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext); - reduceContext.consumeBucketsAndMaybeBreak(1); - reducedBuckets.add(reduced); - currentBuckets.clear(); - key = reduceRounding.round(top.current.key); - } - - currentBuckets.add(top.current); - - if (top.iterator.hasNext()) { - final Bucket next = top.iterator.next(); - assert next.key > top.current.key : "shards must return data sorted by key"; - top.current = next; - pq.updateTop(); - } else { - pq.pop(); - } - } while (pq.size() > 0); - - if (currentBuckets.isEmpty() == false) { - final Bucket reduced = currentBuckets.get(0).reduce(currentBuckets, reduceRounding, reduceContext); - reduceContext.consumeBucketsAndMaybeBreak(1); - reducedBuckets.add(reduced); - } - } - - return mergeBucketsIfNeeded(reducedBuckets, reduceRoundingIdx, reduceRoundingInfo, reduceContext); - } - - private BucketReduceResult mergeBucketsIfNeeded(List reducedBuckets, int reduceRoundingIdx, RoundingInfo reduceRoundingInfo, - ReduceContext reduceContext) { - while (reducedBuckets.size() > (targetBuckets * reduceRoundingInfo.getMaximumInnerInterval()) - && reduceRoundingIdx < bucketInfo.roundingInfos.length - 1) { - reduceRoundingIdx++; - reduceRoundingInfo = bucketInfo.roundingInfos[reduceRoundingIdx]; - reducedBuckets = mergeBuckets(reducedBuckets, reduceRoundingInfo.rounding, reduceContext); - } - return new BucketReduceResult(reducedBuckets, reduceRoundingInfo, reduceRoundingIdx); - } - - private List mergeBuckets(List reducedBuckets, Rounding reduceRounding, ReduceContext reduceContext) { - List mergedBuckets = new ArrayList<>(); - - List sameKeyedBuckets = new ArrayList<>(); - double key = Double.NaN; - for (Bucket bucket : reducedBuckets) { - long roundedBucketKey = reduceRounding.round(bucket.key); - if (Double.isNaN(key)) { - key = roundedBucketKey; - reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); - sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); - } else if (roundedBucketKey == key) { - reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); - sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); - } else { - reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext)); - sameKeyedBuckets.clear(); - key = roundedBucketKey; - reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); - sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); - } - } - if (sameKeyedBuckets.isEmpty() == false) { - reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, reduceRounding, reduceContext)); - } - reducedBuckets = mergedBuckets; - return reducedBuckets; - } - - private static class BucketReduceResult { - List buckets; - RoundingInfo roundingInfo; - int roundingIdx; - - BucketReduceResult(List buckets, RoundingInfo roundingInfo, int roundingIdx) { - this.buckets = buckets; - this.roundingInfo = roundingInfo; - this.roundingIdx = roundingIdx; - - } - } - - private BucketReduceResult addEmptyBuckets(BucketReduceResult currentResult, ReduceContext reduceContext) { - List list = currentResult.buckets; - if (list.isEmpty()) { - return currentResult; - } - int roundingIdx = getAppropriateRounding(list.get(0).key, list.get(list.size() - 1).key, currentResult.roundingIdx, - bucketInfo.roundingInfos); - RoundingInfo roundingInfo = bucketInfo.roundingInfos[roundingIdx]; - Rounding rounding = roundingInfo.rounding; - // merge buckets using the new rounding - list = mergeBuckets(list, rounding, reduceContext); - - Bucket lastBucket = null; - ListIterator iter = list.listIterator(); - InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(bucketInfo.emptySubAggregations), - reduceContext); - - // Add the empty buckets within the data, - // e.g. if the data series is [1,2,3,7] there're 3 empty buckets that will be created for 4,5,6 - while (iter.hasNext()) { - Bucket nextBucket = list.get(iter.nextIndex()); - if (lastBucket != null) { - long key = rounding.nextRoundingValue(lastBucket.key); - while (key < nextBucket.key) { - reduceContext.consumeBucketsAndMaybeBreak(1); - iter.add(new InternalAutoDateHistogram.Bucket(key, 0, format, reducedEmptySubAggs)); - key = rounding.nextRoundingValue(key); - } - assert key == nextBucket.key : "key: " + key + ", nextBucket.key: " + nextBucket.key; - } - lastBucket = iter.next(); - } - return new BucketReduceResult(list, roundingInfo, roundingIdx); - } - - private int getAppropriateRounding(long minKey, long maxKey, int roundingIdx, RoundingInfo[] roundings) { - if (roundingIdx == roundings.length - 1) { - return roundingIdx; - } - int currentRoundingIdx = roundingIdx; - - // Getting the accurate number of required buckets can be slow for large - // ranges at low roundings so get a rough estimate of the rounding first - // so we are at most 1 away from the correct rounding and then get the - // accurate rounding value - for (int i = currentRoundingIdx + 1; i < roundings.length; i++) { - long dataDuration = maxKey - minKey; - long roughEstimateRequiredBuckets = dataDuration / roundings[i].getRoughEstimateDurationMillis(); - if (roughEstimateRequiredBuckets < targetBuckets * roundings[i].getMaximumInnerInterval()) { - currentRoundingIdx = i - 1; - break; - } else if (i == roundingIdx - 1) { - currentRoundingIdx = i; - break; - } - } - - int requiredBuckets = 0; - do { - Rounding currentRounding = roundings[currentRoundingIdx].rounding; - long currentKey = minKey; - requiredBuckets = 0; - while (currentKey < maxKey) { - requiredBuckets++; - currentKey = currentRounding.nextRoundingValue(currentKey); - } - currentRoundingIdx++; - } while (requiredBuckets > (targetBuckets * roundings[roundingIdx].getMaximumInnerInterval()) - && currentRoundingIdx < roundings.length); - // The loop will increase past the correct rounding index here so we - // need to subtract one to get the rounding index we need - return currentRoundingIdx - 1; - } - - @Override - public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { - BucketReduceResult reducedBucketsResult = reduceBuckets(aggregations, reduceContext); - - if (reduceContext.isFinalReduce()) { - // adding empty buckets if needed - reducedBucketsResult = addEmptyBuckets(reducedBucketsResult, reduceContext); - - // Adding empty buckets may have tipped us over the target so merge the buckets again if needed - reducedBucketsResult = mergeBucketsIfNeeded(reducedBucketsResult.buckets, reducedBucketsResult.roundingIdx, - reducedBucketsResult.roundingInfo, reduceContext); - - // Now finally see if we need to merge consecutive buckets together to make a coarser interval at the same rounding - reducedBucketsResult = maybeMergeConsecutiveBuckets(reducedBucketsResult, reduceContext); - } - - BucketInfo bucketInfo = new BucketInfo(this.bucketInfo.roundingInfos, reducedBucketsResult.roundingIdx, - this.bucketInfo.emptySubAggregations); - - return new InternalAutoDateHistogram(getName(), reducedBucketsResult.buckets, targetBuckets, bucketInfo, format, - pipelineAggregators(), getMetaData()); - } - - private BucketReduceResult maybeMergeConsecutiveBuckets(BucketReduceResult reducedBucketsResult, ReduceContext reduceContext) { - List buckets = reducedBucketsResult.buckets; - RoundingInfo roundingInfo = reducedBucketsResult.roundingInfo; - int roundingIdx = reducedBucketsResult.roundingIdx; - if (buckets.size() > targetBuckets) { - for (int interval : roundingInfo.innerIntervals) { - int resultingBuckets = buckets.size() / interval; - if (resultingBuckets <= targetBuckets) { - return mergeConsecutiveBuckets(buckets, interval, roundingIdx, roundingInfo, reduceContext); - } - } - } - return reducedBucketsResult; - } - - private BucketReduceResult mergeConsecutiveBuckets(List reducedBuckets, int mergeInterval, int roundingIdx, - RoundingInfo roundingInfo, ReduceContext reduceContext) { - List mergedBuckets = new ArrayList<>(); - List sameKeyedBuckets = new ArrayList<>(); - - double key = roundingInfo.rounding.round(reducedBuckets.get(0).key); - for (int i = 0; i < reducedBuckets.size(); i++) { - Bucket bucket = reducedBuckets.get(i); - if (i % mergeInterval == 0 && sameKeyedBuckets.isEmpty() == false) { - reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext)); - sameKeyedBuckets.clear(); - key = roundingInfo.rounding.round(bucket.key); - } - reduceContext.consumeBucketsAndMaybeBreak(-countInnerBucket(bucket) - 1); - sameKeyedBuckets.add(createBucket(key, bucket.docCount, bucket.aggregations)); - } - if (sameKeyedBuckets.isEmpty() == false) { - reduceContext.consumeBucketsAndMaybeBreak(1); - mergedBuckets.add(sameKeyedBuckets.get(0).reduce(sameKeyedBuckets, roundingInfo.rounding, reduceContext)); - } - return new BucketReduceResult(mergedBuckets, roundingInfo, roundingIdx); - } - - @Override - public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { - builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (Bucket bucket : buckets) { - bucket.toXContent(builder, params); - } - builder.endArray(); - return builder; - } - - // HistogramFactory method impls - - @Override - public Number getKey(MultiBucketsAggregation.Bucket bucket) { - return ((Bucket) bucket).key; - } - - @Override - public Number nextKey(Number key) { - return bucketInfo.roundingInfos[bucketInfo.roundingIdx].rounding.nextRoundingValue(key.longValue()); - } - - @Override - public InternalAggregation createAggregation(List buckets) { - // convert buckets to the right type - List buckets2 = new ArrayList<>(buckets.size()); - for (Object b : buckets) { - buckets2.add((Bucket) b); - } - buckets2 = Collections.unmodifiableList(buckets2); - return new InternalAutoDateHistogram(name, buckets2, targetBuckets, bucketInfo, format, pipelineAggregators(), getMetaData()); - } - - @Override - public Bucket createBucket(Number key, long docCount, InternalAggregations aggregations) { - return new Bucket(key.longValue(), docCount, format, aggregations); - } - - @Override - protected boolean doEquals(Object obj) { - InternalAutoDateHistogram that = (InternalAutoDateHistogram) obj; - return Objects.equals(buckets, that.buckets) - && Objects.equals(format, that.format) - && Objects.equals(bucketInfo, that.bucketInfo); - } - - @Override - protected int doHashCode() { - return Objects.hash(buckets, format, bucketInfo); - } -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 669bda5574d31..84dec2c983e28 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -424,7 +424,7 @@ private void addEmptyBuckets(List list, ReduceContext reduceContext) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); key = nextKey(key).longValue(); } - assert key == nextBucket.key : "key: " + key + ", nextBucket.key: " + nextBucket.key; + assert key == nextBucket.key; } lastBucket = iter.next(); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java deleted file mode 100644 index caca44f9f2ea7..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.elasticsearch.common.xcontent.ObjectParser; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; - -import java.io.IOException; -import java.util.List; - -public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation implements Histogram { - - @Override - public String getType() { - return AutoDateHistogramAggregationBuilder.NAME; - } - - @Override - public List getBuckets() { - return buckets; - } - - private static ObjectParser PARSER = - new ObjectParser<>(ParsedAutoDateHistogram.class.getSimpleName(), true, ParsedAutoDateHistogram::new); - static { - declareMultiBucketAggregationFields(PARSER, - parser -> ParsedBucket.fromXContent(parser, false), - parser -> ParsedBucket.fromXContent(parser, true)); - } - - public static ParsedAutoDateHistogram fromXContent(XContentParser parser, String name) throws IOException { - ParsedAutoDateHistogram aggregation = PARSER.parse(parser, null); - aggregation.setName(name); - return aggregation; - } - - public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBucket implements Histogram.Bucket { - - private Long key; - - @Override - public Object getKey() { - if (key != null) { - return new DateTime(key, DateTimeZone.UTC); - } - return null; - } - - @Override - public String getKeyAsString() { - String keyAsString = super.getKeyAsString(); - if (keyAsString != null) { - return keyAsString; - } - if (key != null) { - return Long.toString(key); - } - return null; - } - - @Override - protected XContentBuilder keyToXContent(XContentBuilder builder) throws IOException { - return builder.field(CommonFields.KEY.getPreferredName(), key); - } - - static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException { - return parseXContent(parser, keyed, ParsedBucket::new, (p, bucket) -> bucket.key = p.longValue()); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java index e381240ca1f61..29d8e327d5cd7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationsTests.java @@ -37,7 +37,6 @@ import org.elasticsearch.search.aggregations.bucket.filter.InternalFiltersTests; import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGridTests; import org.elasticsearch.search.aggregations.bucket.global.InternalGlobalTests; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogramTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogramTests; import org.elasticsearch.search.aggregations.bucket.missing.InternalMissingTests; @@ -126,7 +125,6 @@ private static List getAggsTests() { aggsTests.add(new InternalGeoCentroidTests()); aggsTests.add(new InternalHistogramTests()); aggsTests.add(new InternalDateHistogramTests()); - aggsTests.add(new InternalAutoDateHistogramTests()); aggsTests.add(new LongTermsTests()); aggsTests.add(new DoubleTermsTests()); aggsTests.add(new StringTermsTests()); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java deleted file mode 100644 index 3a10edf183376..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket; - -import org.elasticsearch.search.aggregations.BaseAggregationTestCase; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; - -public class AutoDateHistogramTests extends BaseAggregationTestCase { - - @Override - protected AutoDateHistogramAggregationBuilder createTestAggregatorBuilder() { - AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder(randomAlphaOfLengthBetween(1, 10)); - builder.field(INT_FIELD_NAME); - builder.setNumBuckets(randomIntBetween(1, 100000)); - if (randomBoolean()) { - builder.format("###.##"); - } - if (randomBoolean()) { - builder.missing(randomIntBetween(0, 10)); - } - if (randomBoolean()) { - builder.timeZone(randomDateTimeZone()); - } - return builder; - } - -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java deleted file mode 100644 index 7cf29e3aa9cc5..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ /dev/null @@ -1,1332 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.apache.lucene.document.Document; -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.SortedNumericDocValuesField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.store.Directory; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.chrono.ISOChronology; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -import static org.hamcrest.Matchers.containsString; - -public class AutoDateHistogramAggregatorTests extends AggregatorTestCase { - - private static final String DATE_FIELD = "date"; - private static final String INSTANT_FIELD = "instant"; - - private static final List dataset = Arrays.asList( - "2010-03-12T01:07:45", - "2010-04-27T03:43:34", - "2012-05-18T04:11:00", - "2013-05-29T05:11:31", - "2013-10-31T08:24:05", - "2015-02-13T13:09:32", - "2015-06-24T13:47:43", - "2015-11-13T16:14:34", - "2016-03-04T17:09:50", - "2017-12-12T22:55:46"); - - public void testMatchNoDocs() throws IOException { - testBothCases(new MatchNoDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) - ); - } - - public void testMatchAllDocs() throws IOException { - Query query = new MatchAllDocsQuery(); - - testSearchCase(query, dataset, - aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), - histogram -> assertEquals(10, histogram.getBuckets().size()) - ); - testSearchAndReduceCase(query, dataset, - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) - ); - } - - public void testSubAggregations() throws IOException { - Query query = new MatchAllDocsQuery(); - testSearchAndReduceCase(query, dataset, - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD) - .subAggregation(AggregationBuilders.stats("stats").field(DATE_FIELD)), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(8, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2010-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - Stats stats = bucket.getAggregations().get("stats"); - assertEquals("2010-03-12T01:07:45.000Z", stats.getMinAsString()); - assertEquals("2010-04-27T03:43:34.000Z", stats.getMaxAsString()); - assertEquals(2L, stats.getCount()); - - bucket = buckets.get(1); - assertEquals("2011-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertTrue(Double.isInfinite(stats.getMin())); - assertTrue(Double.isInfinite(stats.getMax())); - assertEquals(0L, stats.getCount()); - - bucket = buckets.get(2); - assertEquals("2012-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2012-05-18T04:11:00.000Z", stats.getMinAsString()); - assertEquals("2012-05-18T04:11:00.000Z", stats.getMaxAsString()); - assertEquals(1L, stats.getCount()); - - bucket = buckets.get(3); - assertEquals("2013-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2013-05-29T05:11:31.000Z", stats.getMinAsString()); - assertEquals("2013-10-31T08:24:05.000Z", stats.getMaxAsString()); - assertEquals(2L, stats.getCount()); - - bucket = buckets.get(4); - assertEquals("2014-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertTrue(Double.isInfinite(stats.getMin())); - assertTrue(Double.isInfinite(stats.getMax())); - assertEquals(0L, stats.getCount()); - - bucket = buckets.get(5); - assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2015-02-13T13:09:32.000Z", stats.getMinAsString()); - assertEquals("2015-11-13T16:14:34.000Z", stats.getMaxAsString()); - assertEquals(3L, stats.getCount()); - - bucket = buckets.get(6); - assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2016-03-04T17:09:50.000Z", stats.getMinAsString()); - assertEquals("2016-03-04T17:09:50.000Z", stats.getMaxAsString()); - assertEquals(1L, stats.getCount()); - - bucket = buckets.get(7); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2017-12-12T22:55:46.000Z", stats.getMinAsString()); - assertEquals("2017-12-12T22:55:46.000Z", stats.getMaxAsString()); - assertEquals(1L, stats.getCount()); - }); - } - - public void testNoDocs() throws IOException { - Query query = new MatchNoDocsQuery(); - List dates = Collections.emptyList(); - Consumer aggregation = agg -> agg.setNumBuckets(10).field(DATE_FIELD); - - testSearchCase(query, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) - ); - testSearchAndReduceCase(query, dates, aggregation, - histogram -> assertNull(histogram) - ); - } - - public void testAggregateWrongField() throws IOException { - testBothCases(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(10).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) - ); - } - - public void testIntervalYear() throws IOException { - testSearchCase(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2015-02-13T13:09:32.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2015-06-24T13:47:43.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2015-11-13T16:14:34.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2016-03-04T17:09:50.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-12-12T22:55:46.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - testSearchAndReduceCase(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - } - - public void testIntervalMonth() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(6, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-03-04T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-03-05T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-03-06T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - } - ); - } - - public void testWithLargeNumberOfBuckets() { - Query query = new MatchAllDocsQuery(); - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> testSearchCase(query, dataset, - aggregation -> aggregation.setNumBuckets(MultiBucketConsumerService.DEFAULT_MAX_BUCKETS+1).field(DATE_FIELD), - // since an exception is thrown, this assertion won't be invoked. - histogram -> assertTrue(false) - )); - assertThat(exception.getMessage(), containsString("must be less than")); - } - - public void testIntervalDay() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(4, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01", - "2017-02-02", - "2017-02-02", - "2017-02-03", - "2017-02-03", - "2017-02-03", - "2017-02-05" - ), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - } - - public void testIntervalDayWithTZ() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(4, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-31T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-02T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-04T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-31T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-02T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-03T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-04T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); - } - - public void testIntervalHour() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:35:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T10:15:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T13:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T14:04:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T14:05:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T15:59:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T16:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T16:48:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T16:59:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(8, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T10:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T11:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T12:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T13:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T14:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - } - ); - } - - public void testIntervalHourWithTZ() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T08:02:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T08:35:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T09:15:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T12:06:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T13:04:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T13:05:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T14:59:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T15:06:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T15:48:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T15:59:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(8, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T08:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T10:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T11:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T12:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T13:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T14:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T15:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - } - ); - } - - public void testAllSecondIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusSeconds(i); - dataset.add(format.print(date)); - } - - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(120, buckets.size()); - for (int i = 0; i < 120; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i * 5), bucket.getKey()); - assertEquals(5, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(60, buckets.size()); - for (int i = 0; i < 60; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i * 10), bucket.getKey()); - assertEquals(10, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(20, buckets.size()); - for (int i = 0; i < 20; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i * 30), bucket.getKey()); - assertEquals(30, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - for (int i = 0; i < 10; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i), bucket.getKey()); - assertEquals(60, bucket.getDocCount()); - } - }); - } - - public void testAllMinuteIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusMinutes(i); - dataset.add(format.print(date)); - } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(120, buckets.size()); - for (int i = 0; i < 120; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i * 5), bucket.getKey()); - assertEquals(5, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(60, buckets.size()); - for (int i = 0; i < 60; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i * 10), bucket.getKey()); - assertEquals(10, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(20, buckets.size()); - for (int i = 0; i < 20; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i * 30), bucket.getKey()); - assertEquals(30, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - for (int i = 0; i < 10; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i), bucket.getKey()); - assertEquals(60, bucket.getDocCount()); - } - }); - } - - public void testAllHourIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusHours(i); - dataset.add(format.print(date)); - } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(200, buckets.size()); - for (int i = 0; i < 200; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i * 3), bucket.getKey()); - assertEquals(3, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(50, buckets.size()); - for (int i = 0; i < 50; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i * 12), bucket.getKey()); - assertEquals(12, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(30).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(25, buckets.size()); - for (int i = 0; i < 25; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusDays(i), bucket.getKey()); - assertEquals(24, bucket.getDocCount()); - } - }); - } - - public void testAllDayIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 700; i++) { - DateTime date = startDate.plusDays(i); - dataset.add(format.print(date)); - } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(700).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(700, buckets.size()); - for (int i = 0; i < 700; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusDays(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(100, buckets.size()); - for (int i = 0; i < 100; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusDays(i * 7), bucket.getKey()); - assertEquals(7, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(30).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(24, buckets.size()); - for (int i = 0; i < 24; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMonths(i), bucket.getKey()); - assertThat(bucket.getDocCount(), Matchers.lessThanOrEqualTo(31L)); - } - }); - } - - public void testAllMonthIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusMonths(i); - dataset.add(format.print(date)); - } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMonths(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(200, buckets.size()); - for (int i = 0; i < 200; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMonths(i * 3), bucket.getKey()); - assertEquals(3, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(60).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(50, buckets.size()); - for (int i = 0; i < 50; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i), bucket.getKey()); - assertEquals(12, bucket.getDocCount()); - } - }); - } - - public void testAllYearIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusYears(i); - dataset.add(format.print(date)); - } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(120, buckets.size()); - for (int i = 0; i < 120; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 5), bucket.getKey()); - assertEquals(5, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(60, buckets.size()); - for (int i = 0; i < 60; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 10), bucket.getKey()); - assertEquals(10, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(30, buckets.size()); - for (int i = 0; i < 30; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 20), bucket.getKey()); - assertEquals(20, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(20).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(12, buckets.size()); - for (int i = 0; i < 12; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 50), bucket.getKey()); - assertEquals(50, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(6, buckets.size()); - for (int i = 0; i < 6; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 100), bucket.getKey()); - assertEquals(100, bucket.getDocCount()); - } - }); - } - - public void testInterval3Hour() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:35:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T10:15:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T13:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T14:04:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T14:05:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T15:59:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T16:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T16:48:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T16:59:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T12:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); - assertEquals(4, bucket.getDocCount()); - } - ); - } - - public void testIntervalMinute() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:35.000Z", - "2017-02-01T09:02:59.000Z", - "2017-02-01T09:15:37.000Z", - "2017-02-01T09:16:04.000Z", - "2017-02-01T09:16:42.000Z" - ), - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:35.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:02:59.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T09:15:37.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T09:16:04.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T09:16:42.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:35.000Z", - "2017-02-01T09:02:59.000Z", - "2017-02-01T09:15:37.000Z", - "2017-02-01T09:16:04.000Z", - "2017-02-01T09:16:42.000Z" - ), - aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(15, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:03:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T09:04:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T09:05:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T09:06:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T09:07:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T09:08:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T09:09:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T09:10:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T09:11:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(10); - assertEquals("2017-02-01T09:12:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(11); - assertEquals("2017-02-01T09:13:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(12); - assertEquals("2017-02-01T09:14:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(13); - assertEquals("2017-02-01T09:15:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(14); - assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - } - ); - } - - public void testIntervalSecond() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01T00:00:05.015Z", "2017-02-01T00:00:07.299Z", "2017-02-01T00:00:07.074Z", - "2017-02-01T00:00:11.688Z", "2017-02-01T00:00:11.210Z", "2017-02-01T00:00:11.380Z"), - aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:07.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T00:00:05.015Z", - "2017-02-01T00:00:07.299Z", - "2017-02-01T00:00:07.074Z", - "2017-02-01T00:00:11.688Z", - "2017-02-01T00:00:11.210Z", - "2017-02-01T00:00:11.380Z" - ), - aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(7, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:06.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T00:00:07.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T00:00:08.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T00:00:09.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - } - ); - } - - private void testSearchCase(Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { - executeTestCase(false, query, dataset, configure, verify); - } - - private void testSearchAndReduceCase(Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { - executeTestCase(true, query, dataset, configure, verify); - } - - private void testBothCases(Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { - testSearchCase(query, dataset, configure, verify); - testSearchAndReduceCase(query, dataset, configure, verify); - } - - @Override - protected IndexSettings createIndexSettings() { - Settings nodeSettings = Settings.builder() - .put("search.max_buckets", 100000).build(); - return new IndexSettings( - IndexMetaData.builder("_index").settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(0) - .creationDate(System.currentTimeMillis()) - .build(), - nodeSettings - ); - } - - private void executeTestCase(boolean reduced, Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { - - try (Directory directory = newDirectory()) { - try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { - Document document = new Document(); - for (String date : dataset) { - if (frequently()) { - indexWriter.commit(); - } - - long instant = asLong(date); - document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); - document.add(new LongPoint(INSTANT_FIELD, instant)); - indexWriter.addDocument(document); - document.clear(); - } - } - - try (IndexReader indexReader = DirectoryReader.open(directory)) { - IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - - AutoDateHistogramAggregationBuilder aggregationBuilder = new AutoDateHistogramAggregationBuilder("_name"); - if (configure != null) { - configure.accept(aggregationBuilder); - } - - DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); - DateFieldMapper.DateFieldType fieldType = builder.fieldType(); - fieldType.setHasDocValues(true); - fieldType.setName(aggregationBuilder.field()); - - InternalAutoDateHistogram histogram; - if (reduced) { - histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, fieldType); - } else { - histogram = search(indexSearcher, query, aggregationBuilder, fieldType); - } - verify.accept(histogram); - } - } - } - - private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(dateTime).getMillis(); - } -} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java deleted file mode 100644 index 389371efd79aa..0000000000000 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.aggregations.bucket.histogram; - -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram.BucketInfo; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.joda.time.DateTime; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TreeMap; - -import static org.elasticsearch.common.unit.TimeValue.timeValueHours; -import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; -import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; - -public class InternalAutoDateHistogramTests extends InternalMultiBucketAggregationTestCase { - - private DocValueFormat format; - private RoundingInfo[] roundingInfos; - - @Override - public void setUp() throws Exception { - super.setUp(); - format = randomNumericDocValueFormat(); - - roundingInfos = new RoundingInfo[6]; - roundingInfos[0] = new RoundingInfo(Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(), 1, 5, 10, 30); - roundingInfos[1] = new RoundingInfo(Rounding.builder(DateTimeUnit.MINUTES_OF_HOUR).build(), 1, 5, 10, 30); - roundingInfos[2] = new RoundingInfo(Rounding.builder(DateTimeUnit.HOUR_OF_DAY).build(), 1, 3, 12); - roundingInfos[3] = new RoundingInfo(Rounding.builder(DateTimeUnit.DAY_OF_MONTH).build(), 1, 7); - roundingInfos[4] = new RoundingInfo(Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(), 1, 3); - roundingInfos[5] = new RoundingInfo(Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).build(), 1, 10, 20, 50, 100); - } - - @Override - protected InternalAutoDateHistogram createTestInstance(String name, - List pipelineAggregators, - Map metaData, - InternalAggregations aggregations) { - int nbBuckets = randomNumberOfBuckets(); - int targetBuckets = randomIntBetween(1, nbBuckets * 2 + 1); - List buckets = new ArrayList<>(nbBuckets); - long startingDate = System.currentTimeMillis(); - - long interval = randomIntBetween(1, 3); - long intervalMillis = randomFrom(timeValueSeconds(interval), timeValueMinutes(interval), timeValueHours(interval)).getMillis(); - - for (int i = 0; i < nbBuckets; i++) { - long key = startingDate + (intervalMillis * i); - buckets.add(i, new InternalAutoDateHistogram.Bucket(key, randomIntBetween(1, 100), format, aggregations)); - } - InternalAggregations subAggregations = new InternalAggregations(Collections.emptyList()); - BucketInfo bucketInfo = new BucketInfo(roundingInfos, randomIntBetween(0, roundingInfos.length - 1), subAggregations); - - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); - } - - @Override - protected void assertReduced(InternalAutoDateHistogram reduced, List inputs) { - int roundingIdx = 0; - for (InternalAutoDateHistogram histogram : inputs) { - if (histogram.getBucketInfo().roundingIdx > roundingIdx) { - roundingIdx = histogram.getBucketInfo().roundingIdx; - } - } - Map expectedCounts = new TreeMap<>(); - for (Histogram histogram : inputs) { - for (Histogram.Bucket bucket : histogram.getBuckets()) { - expectedCounts.compute(roundingInfos[roundingIdx].rounding.round(((DateTime) bucket.getKey()).getMillis()), - (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); - } - } - Map actualCounts = new TreeMap<>(); - for (Histogram.Bucket bucket : reduced.getBuckets()) { - actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), - (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); - } - assertEquals(expectedCounts, actualCounts); - } - - @Override - protected Writeable.Reader instanceReader() { - return InternalAutoDateHistogram::new; - } - - @Override - protected Class implementationClass() { - return ParsedAutoDateHistogram.class; - } - - @Override - protected InternalAutoDateHistogram mutateInstance(InternalAutoDateHistogram instance) { - String name = instance.getName(); - List buckets = instance.getBuckets(); - int targetBuckets = instance.getTargetBuckets(); - BucketInfo bucketInfo = instance.getBucketInfo(); - List pipelineAggregators = instance.pipelineAggregators(); - Map metaData = instance.getMetaData(); - switch (between(0, 3)) { - case 0: - name += randomAlphaOfLength(5); - break; - case 1: - buckets = new ArrayList<>(buckets); - buckets.add(new InternalAutoDateHistogram.Bucket(randomNonNegativeLong(), randomIntBetween(1, 100), format, - InternalAggregations.EMPTY)); - break; - case 2: - int roundingIdx = bucketInfo.roundingIdx == bucketInfo.roundingInfos.length - 1 ? 0 : bucketInfo.roundingIdx + 1; - bucketInfo = new BucketInfo(bucketInfo.roundingInfos, roundingIdx, bucketInfo.emptySubAggregations); - break; - case 3: - if (metaData == null) { - metaData = new HashMap<>(1); - } else { - metaData = new HashMap<>(instance.getMetaData()); - } - metaData.put(randomAlphaOfLength(15), randomInt()); - break; - default: - throw new AssertionError("Illegal randomisation branch"); - } - return new InternalAutoDateHistogram(name, buckets, targetBuckets, bucketInfo, format, pipelineAggregators, metaData); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index 612c1342cc5d3..67eba5281d9b4 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -85,6 +85,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.elasticsearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; /** * Base class for testing {@link Aggregator} implementations. @@ -226,7 +227,7 @@ public boolean shouldCache(Query query) throws IOException { }); when(searchContext.bitsetFilterCache()).thenReturn(new BitsetFilterCache(indexSettings, mock(Listener.class))); doAnswer(invocation -> { - /* Store the release-ables so we can release them at the end of the test case. This is important because aggregations don't + /* Store the releasables so we can release them at the end of the test case. This is important because aggregations don't * close their sub-aggregations. This is fairly similar to what the production code does. */ releasables.add((Releasable) invocation.getArguments()[0]); return null; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 15e44853a97ba..838b0e315ea0e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -53,10 +53,8 @@ import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal; -import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; -import org.elasticsearch.search.aggregations.bucket.histogram.ParsedAutoDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedDateHistogram; import org.elasticsearch.search.aggregations.bucket.histogram.ParsedHistogram; import org.elasticsearch.search.aggregations.bucket.missing.MissingAggregationBuilder; @@ -183,7 +181,6 @@ public abstract class InternalAggregationTestCase map.put(GeoCentroidAggregationBuilder.NAME, (p, c) -> ParsedGeoCentroid.fromXContent(p, (String) c)); map.put(HistogramAggregationBuilder.NAME, (p, c) -> ParsedHistogram.fromXContent(p, (String) c)); map.put(DateHistogramAggregationBuilder.NAME, (p, c) -> ParsedDateHistogram.fromXContent(p, (String) c)); - map.put(AutoDateHistogramAggregationBuilder.NAME, (p, c) -> ParsedAutoDateHistogram.fromXContent(p, (String) c)); map.put(StringTerms.NAME, (p, c) -> ParsedStringTerms.fromXContent(p, (String) c)); map.put(LongTerms.NAME, (p, c) -> ParsedLongTerms.fromXContent(p, (String) c)); map.put(DoubleTerms.NAME, (p, c) -> ParsedDoubleTerms.fromXContent(p, (String) c)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java index 6f0aebe23966b..952b6c027945b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalMultiBucketAggregationTestCase.java @@ -149,8 +149,7 @@ private void assertMultiBucketsAggregations(Aggregation expected, Aggregation ac protected void assertMultiBucketsAggregation(MultiBucketsAggregation expected, MultiBucketsAggregation actual, boolean checkOrder) { Class parsedClass = implementationClass(); assertNotNull("Parsed aggregation class must not be null", parsedClass); - assertTrue("Unexpected parsed class, expected instance of: " + actual + ", but was: " + parsedClass, - parsedClass.isInstance(actual)); + assertTrue(parsedClass.isInstance(actual)); assertTrue(expected instanceof InternalAggregation); assertEquals(expected.getName(), actual.getName()); From 9036c6114427eb6602aee929a1991d4cd0b17688 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Mon, 16 Jul 2018 12:11:24 -0700 Subject: [PATCH 13/13] Use correct formatting for links (#29460) --- docs/reference/how-to/recipes.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/how-to/recipes.asciidoc b/docs/reference/how-to/recipes.asciidoc index e798f8819d000..451e192ad6ad2 100644 --- a/docs/reference/how-to/recipes.asciidoc +++ b/docs/reference/how-to/recipes.asciidoc @@ -3,8 +3,8 @@ This section includes a few recipes to help with common problems: -* mixing-exact-search-with-stemming -* consistent-scoring +* <> +* <> include::recipes/stemming.asciidoc[] include::recipes/scoring.asciidoc[]