From 51cab98a674bef95fe358ddfd7741489482e1212 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 09:44:18 -0500 Subject: [PATCH 01/81] Bump com.gradle.enterprise from 3.15.1 to 3.16.1 (#11629) * Bump com.gradle.enterprise from 3.15.1 to 3.16.1 Bumps com.gradle.enterprise from 3.15.1 to 3.16.1. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- settings.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c5ea3f83bffc7..2ce89159e4783 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -143,7 +143,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) - Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) - Bump `reactor-netty-core` from 1.1.12 to 1.1.13 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)) -- Bump `com.gradle.enterprise` from 3.14.1 to 3.15.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339)) +- Bump `com.gradle.enterprise` from 3.14.1 to 3.16.1 ([#11339](https://github.com/opensearch-project/OpenSearch/pull/11339), [#11629](https://github.com/opensearch-project/OpenSearch/pull/11629)) - Bump `actions/setup-java` from 3 to 4 ([#11447](https://github.com/opensearch-project/OpenSearch/pull/11447)) - Bump `commons-net:commons-net` from 3.9.0 to 3.10.0 ([#11450](https://github.com/opensearch-project/OpenSearch/pull/11450)) - Bump `org.apache.maven:maven-model` from 3.9.4 to 3.9.6 ([#11445](https://github.com/opensearch-project/OpenSearch/pull/11445)) diff --git a/settings.gradle b/settings.gradle index 139d45013710f..24ab4a7a22237 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.15.1" + id "com.gradle.enterprise" version "3.16.1" } ext.disableBuildCache = hasProperty('DISABLE_BUILD_CACHE') || System.getenv().containsKey('DISABLE_BUILD_CACHE') From 2b1c9ae3c4109e30a6e6a30553ea915a61bd5ba7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E4=BD=95=E5=BB=B6=E9=BE=99?= Date: Mon, 18 Dec 2023 23:42:39 +0800 Subject: [PATCH 02/81] Fix automatic addition of protocol in discovery endpoint (#11612) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix #11609 Signed-off-by: Yanlong He * Fix #11609 Signed-off-by: heyanlong * Update AwsEc2ServiceImplTests.java Signed-off-by: 何延龙 * spotless Signed-off-by: heyanlong --------- Signed-off-by: Yanlong He Signed-off-by: heyanlong Signed-off-by: 何延龙 --- CHANGELOG.md | 1 + .../org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java | 2 +- .../opensearch/discovery/ec2/AwsEc2ServiceImplTests.java | 6 ++++++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ce89159e4783..15d4374d27929 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -195,6 +195,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) - Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) - Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) +- Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) ### Security diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java index 8d31403b77c36..a2e920761b655 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -114,7 +114,7 @@ protected String getFullEndpoint(String endpoint) { if (!Strings.hasText(endpoint)) { return null; } - if (endpoint.startsWith("http")) { + if (endpoint.startsWith("http://") || endpoint.startsWith("https://")) { return endpoint; } diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java index 327a7da2c4cab..3164abe456515 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/AwsEc2ServiceImplTests.java @@ -211,6 +211,10 @@ public void testGetFullEndpointWithScheme() { String endpoint = awsEc2ServiceImpl.getFullEndpoint(clientSettings.endpoint); assertEquals("http://ec2.us-west-2.amazonaws.com", endpoint); + + assertEquals("http://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("http://httpserver.example.com")); + + assertEquals("https://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("https://httpserver.example.com")); } public void testGetFullEndpointWithoutScheme() { @@ -222,6 +226,8 @@ public void testGetFullEndpointWithoutScheme() { String endpoint = awsEc2ServiceImpl.getFullEndpoint(clientSettings.endpoint); assertEquals("https://ec2.us-west-2.amazonaws.com", endpoint); + assertEquals("https://httpserver.example.com", awsEc2ServiceImpl.getFullEndpoint("httpserver.example.com")); + assertNull(awsEc2ServiceImpl.getFullEndpoint("")); } } From 9146c19c4a2d8cea2e882dfd938cf80d04789f04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 15:15:06 -0500 Subject: [PATCH 03/81] Bump com.squareup.okio:okio from 3.6.0 to 3.7.0 in /test/fixtures/hdfs-fixture (#11632) * Bump com.squareup.okio:okio in /test/fixtures/hdfs-fixture Bumps [com.squareup.okio:okio](https://github.com/square/okio) from 3.6.0 to 3.7.0. - [Changelog](https://github.com/square/okio/blob/master/CHANGELOG.md) - [Commits](https://github.com/square/okio/compare/parent-3.6.0...parent-3.7.0) --- updated-dependencies: - dependency-name: com.squareup.okio:okio dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 15d4374d27929..5fe1fc9f5e165 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -132,7 +132,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) - Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) -- Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637)) +- Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632)) - Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270)) - Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) - Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index bb9d70ac39398..7adf29792f27d 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -76,6 +76,6 @@ dependencies { runtimeOnly("com.squareup.okhttp3:okhttp:4.12.0") { exclude group: "com.squareup.okio" } - runtimeOnly "com.squareup.okio:okio:3.6.0" + runtimeOnly "com.squareup.okio:okio:3.7.0" runtimeOnly "org.xerial.snappy:snappy-java:1.1.10.5" } From 217c382fc53196460fbc9ba52ee93afc4a7b361a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 15:56:42 -0600 Subject: [PATCH 04/81] Bump actions/download-artifact from 3 to 4 (#11631) Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 3 to 4. - [Release notes](https://github.com/actions/download-artifact/releases) - [Commits](https://github.com/actions/download-artifact/compare/v3...v4) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/check-compatibility.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml index d93f7e73b91e7..f400991366da1 100644 --- a/.github/workflows/check-compatibility.yml +++ b/.github/workflows/check-compatibility.yml @@ -48,7 +48,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download results - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v4 with: name: results.txt From 37dae8265e76c1a2da1aa56aeb3a6ce18005c4a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 17:42:04 -0500 Subject: [PATCH 05/81] Bump com.netflix.nebula.ospackage-base from 11.5.0 to 11.6.0 in /distribution/packages (#11630) * Bump com.netflix.nebula.ospackage-base in /distribution/packages Bumps com.netflix.nebula.ospackage-base from 11.5.0 to 11.6.0. --- updated-dependencies: - dependency-name: com.netflix.nebula.ospackage-base dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- distribution/packages/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fe1fc9f5e165..ebad434b8fd5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -126,7 +126,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) -- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.5.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295)) +- Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.6.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) - Bump `de.thetaphi:forbiddenapis` from 3.5.1 to 3.6 ([#10508](https://github.com/opensearch-project/OpenSearch/pull/10508)) - Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639)) diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index cb05661dc74a4..ededa7bff34d8 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -63,7 +63,7 @@ import java.util.regex.Pattern */ plugins { - id "com.netflix.nebula.ospackage-base" version "11.5.0" + id "com.netflix.nebula.ospackage-base" version "11.6.0" } void addProcessFilesTask(String type, boolean jdk) { From 863d45370d62868d3d5a926798571efffc3b90d2 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 19 Dec 2023 09:48:11 -0500 Subject: [PATCH 06/81] Add additional handling in SearchTemplateRequest when simulate is set to true (#11591) Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../script/mustache/SearchTemplateRequest.java | 9 +++++++++ .../mustache/SearchTemplateRequestTests.java | 16 ++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ebad434b8fd5f..52b89e57c0959 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -121,6 +121,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) - [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) - Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) ### Dependencies - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java index 1aabea30fc651..d02c5f1efa591 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/SearchTemplateRequest.java @@ -259,16 +259,25 @@ public void writeTo(StreamOutput out) throws IOException { @Override public String[] indices() { + if (request == null) { + return new String[0]; + } return request.indices(); } @Override public IndicesOptions indicesOptions() { + if (request == null) { + return SearchRequest.DEFAULT_INDICES_OPTIONS; + } return request.indicesOptions(); } @Override public IndicesRequest indices(String... indices) { + if (request == null) { + return new SearchRequest(new String[0]).indices(indices); + } return request.indices(indices); } } diff --git a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java index 72443d1323b44..71ce616fd5d94 100644 --- a/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/opensearch/script/mustache/SearchTemplateRequestTests.java @@ -32,6 +32,7 @@ package org.opensearch.script.mustache; +import org.opensearch.action.search.SearchRequest; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.script.ScriptType; import org.opensearch.search.RandomSearchRequestGenerator; @@ -110,4 +111,19 @@ public static SearchTemplateRequest createRandomRequest() { request.setRequest(RandomSearchRequestGenerator.randomSearchRequest(SearchSourceBuilder::searchSource)); return request; } + + public void testSimulatedSearchTemplateRequest() { + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setSimulate(true); + + assertEquals(0, request.indices().length); + assertEquals(SearchRequest.DEFAULT_INDICES_OPTIONS, request.indicesOptions()); + assertEquals(2, request.indices("index1", "index2").indices().length); + + SearchTemplateRequest randomRequest = createRandomRequest(); + int expectedIndicesLength = randomRequest.indices().length; + request.setSimulate(true); + + assertEquals(expectedIndicesLength, randomRequest.indices().length); + } } From f1243eafdac3bde3a037c2b2b53d52819020b483 Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Tue, 19 Dec 2023 09:08:46 -0600 Subject: [PATCH 07/81] Bump actions/upload-artifact from 3 to 4 (#11647) Signed-off-by: Peter Nied --- .github/workflows/check-compatibility.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml index f400991366da1..d6c65ddd446cd 100644 --- a/.github/workflows/check-compatibility.yml +++ b/.github/workflows/check-compatibility.yml @@ -36,7 +36,7 @@ jobs: echo "### Compatible components" >> "${{ github.workspace }}/results.txt" && grep -e 'Compatible component' $HOME/gradlew-check.out | sed -e 's/Compatible component: \[\(.*\)\]/- \1/' >> "${{ github.workspace }}/results.txt" - name: Upload results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: results.txt path: ${{ github.workspace }}/results.txt From 2c8ee1947b55a1cc5bc1a114b82e3a3b8a99851e Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 19 Dec 2023 14:15:59 -0600 Subject: [PATCH 08/81] Add context for custom GC tunings (#11651) Signed-off-by: Andrew Ross --- distribution/src/config/jvm.options | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 1a0abcbaf9c88..f0ac98faffda9 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -38,12 +38,12 @@ 8-10:-XX:+UseCMSInitiatingOccupancyOnly ## G1GC Configuration -# NOTE: G1 GC is only supported on JDK version 10 or later -# to use G1GC, uncomment the next two lines and update the version on the -# following three lines to your version of the JDK -# 10:-XX:-UseConcMarkSweepGC -# 10:-XX:-UseCMSInitiatingOccupancyOnly +# NOTE: G1GC is the default GC for all JDKs 11 and newer 11-:-XX:+UseG1GC +# See https://github.com/elastic/elasticsearch/pull/46169 for the history +# behind these settings, but the tl;dr is that default values can lead +# to situations where heap usage grows enough to trigger a circuit breaker +# before GC kicks in. 11-:-XX:G1ReservePercent=25 11-:-XX:InitiatingHeapOccupancyPercent=30 From 6a6ab32e532d16de222645ccfae376c778c68991 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Thu, 21 Dec 2023 11:52:22 -0800 Subject: [PATCH 09/81] [Segment Replication] Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations (#11583) * Add new cluster level setting that prevents index level replication type setting overrides Signed-off-by: Suraj Singh * Block restore on mis-matching replication type setting Signed-off-by: Suraj Singh * Address review comments and rebase Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Use appropriate variable names Signed-off-by: Suraj Singh * Fix failing integ test Signed-off-by: Suraj Singh --------- Signed-off-by: Suraj Singh --- CHANGELOG.md | 1 + .../SegmentReplicationClusterSettingIT.java | 142 +++++++++++++++++- .../SegmentReplicationSnapshotIT.java | 63 ++++++++ .../metadata/MetadataCreateIndexService.java | 29 ++++ .../common/settings/ClusterSettings.java | 3 +- .../opensearch/indices/IndicesService.java | 13 ++ .../MetadataCreateIndexServiceTests.java | 104 +++++++++++++ 7 files changed, 346 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52b89e57c0959..ca10ecf57e2b6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -122,6 +122,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) - Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) - Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) +- Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) ### Dependencies - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index a82fd8d845709..c4e8ccfc0ecec 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -10,7 +10,10 @@ import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.opensearch.action.admin.indices.shrink.ResizeType; +import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.Index; import org.opensearch.index.IndexModule; @@ -18,8 +21,15 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Locale; + import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; +import static org.hamcrest.Matchers.hasSize; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SegmentReplicationClusterSettingIT extends OpenSearchIntegTestCase { @@ -29,6 +39,9 @@ public class SegmentReplicationClusterSettingIT extends OpenSearchIntegTestCase protected static final int SHARD_COUNT = 1; protected static final int REPLICA_COUNT = 1; + protected static final String REPLICATION_MISMATCH_VALIDATION_ERROR = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];"; + @Override public Settings indexSettings() { return Settings.builder() @@ -44,14 +57,6 @@ protected boolean addMockInternalEngine() { return false; } - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .build(); - } - public void testIndexReplicationSettingOverridesSegRepClusterSetting() throws Exception { Settings settings = Settings.builder().put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); final String ANOTHER_INDEX = "test-index"; @@ -123,4 +128,125 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } + public void testReplicationTypesOverrideNotAllowed_IndexAPI() { + // Generate mutually exclusive replication strategies at cluster and index level + List replicationStrategies = getRandomReplicationTypesAsList(); + ReplicationType clusterLevelReplication = replicationStrategies.get(0); + ReplicationType indexLevelReplication = replicationStrategies.get(1); + Settings nodeSettings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + Settings indexSettings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, indexLevelReplication).build(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME, indexSettings)); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } + + public void testReplicationTypesOverrideNotAllowed_WithTemplates() { + // Generate mutually exclusive replication strategies at cluster and index level + List replicationStrategies = getRandomReplicationTypesAsList(); + ReplicationType clusterLevelReplication = replicationStrategies.get(0); + ReplicationType templateReplicationType = replicationStrategies.get(1); + Settings nodeSettings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + logger.info( + "--> Create index with template replication {} and cluster level replication {}", + templateReplicationType, + clusterLevelReplication + ); + // Create index template + client().admin() + .indices() + .preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("test-idx*")) + .setSettings(Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, templateReplicationType).build()) + .setOrder(0) + .get(); + + GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates().get(); + assertThat(response.getIndexTemplates(), hasSize(1)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> createIndex(INDEX_NAME)); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } + + public void testReplicationTypesOverrideNotAllowed_WithResizeAction() { + // Generate mutually exclusive replication strategies at cluster and index level + List replicationStrategies = getRandomReplicationTypesAsList(); + ReplicationType clusterLevelReplication = replicationStrategies.get(0); + ReplicationType indexLevelReplication = replicationStrategies.get(1); + Settings nodeSettings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, clusterLevelReplication) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + internalCluster().startClusterManagerOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + internalCluster().startDataOnlyNode(nodeSettings); + logger.info( + "--> Create index with index level replication {} and cluster level replication {}", + indexLevelReplication, + clusterLevelReplication + ); + + // Define resize action and target shard count. + List> resizeActionsList = new ArrayList<>(); + final int initialShardCount = 2; + resizeActionsList.add(new Tuple<>(ResizeType.SPLIT, 2 * initialShardCount)); + resizeActionsList.add(new Tuple<>(ResizeType.SHRINK, SHARD_COUNT)); + resizeActionsList.add(new Tuple<>(ResizeType.CLONE, initialShardCount)); + + Tuple resizeActionTuple = resizeActionsList.get(random().nextInt(resizeActionsList.size())); + final String targetIndexName = resizeActionTuple.v1().name().toLowerCase(Locale.ROOT) + "-target"; + + logger.info("--> Performing resize action {} with shard count {}", resizeActionTuple.v1(), resizeActionTuple.v2()); + + Settings indexSettings = Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, initialShardCount) + .put(SETTING_REPLICATION_TYPE, clusterLevelReplication) + .build(); + createIndex(INDEX_NAME, indexSettings); + + // Block writes + client().admin().indices().prepareUpdateSettings(INDEX_NAME).setSettings(Settings.builder().put("index.blocks.write", true)).get(); + ensureGreen(); + + // Validate resize action fails + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex(INDEX_NAME, targetIndexName) + .setResizeType(resizeActionTuple.v1()) + .setSettings( + Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", resizeActionTuple.v2()) + .putNull("index.blocks.write") + .put(SETTING_REPLICATION_TYPE, indexLevelReplication) + .build() + ) + .get() + ); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } + + /** + * Generate a list of ReplicationType with random ordering + * + * @return List of ReplicationType values + */ + private List getRandomReplicationTypesAsList() { + List replicationStrategies = List.of(ReplicationType.SEGMENT, ReplicationType.DOCUMENT); + int randomReplicationIndex = random().nextInt(replicationStrategies.size()); + ReplicationType clusterLevelReplication = replicationStrategies.get(randomReplicationIndex); + ReplicationType indexLevelReplication = replicationStrategies.get(1 - randomReplicationIndex); + return List.of(clusterLevelReplication, indexLevelReplication); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java index c2ce7e48f92d2..2c12c0abb202b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; +import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; import static org.opensearch.indices.replication.SegmentReplicationBaseIT.waitForSearchableDocs; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -47,6 +48,9 @@ public class SegmentReplicationSnapshotIT extends AbstractSnapshotIntegTestCase private static final String REPOSITORY_NAME = "test-segrep-repo"; private static final String SNAPSHOT_NAME = "test-segrep-snapshot"; + protected static final String REPLICATION_MISMATCH_VALIDATION_ERROR = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];"; + public Settings segRepEnableIndexSettings() { return getShardSettings().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); } @@ -306,4 +310,63 @@ public void testSnapshotRestoreOnIndexWithSegRepClusterSetting() throws Exceptio IndicesService indicesService = internalCluster().getInstance(IndicesService.class); assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); } + + /** + * 1. Create index in DOCUMENT replication type + * 2. Snapshot index + * 3. Add new set of nodes with `cluster.indices.replication.strategy` set to SEGMENT and `cluster.index.restrict.replication.type` + * set to true. + * 4. Perform restore on new set of nodes to validate restored index has `DOCUMENT` replication. + */ + public void testSnapshotRestoreOnRestrictReplicationSetting() throws Exception { + final int documentCount = scaledRandomIntBetween(1, 10); + String originalClusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + + // Starting two nodes with primary and replica shards respectively. + final String primaryNode = internalCluster().startDataOnlyNode(); + prepareCreate( + INDEX_NAME, + Settings.builder() + // we want to override cluster replication setting by passing a index replication setting + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) + ).get(); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replicaNode = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + for (int i = 0; i < documentCount; i++) { + client().prepareIndex(INDEX_NAME).setId(String.valueOf(i)).setSource("foo", "bar").get(); + } + + createSnapshot(); + + // Delete index + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); + assertFalse("index [" + INDEX_NAME + "] should have been deleted", indexExists(INDEX_NAME)); + + // Start new set of nodes with cluster level replication type setting and restrict replication type setting. + Settings settings = Settings.builder() + .put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + + // Start new cluster manager node + String newClusterManagerNode = internalCluster().startClusterManagerOnlyNode(settings); + + // Remove older nodes + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(originalClusterManagerNode)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + + String newPrimaryNode = internalCluster().startDataOnlyNode(settings); + String newReplicaNode = internalCluster().startDataOnlyNode(settings); + + // Perform snapshot restore + logger.info("--> Performing snapshot restore to target index"); + + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> restoreSnapshotWithSettings(null)); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getMessage()); + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index bb1bf94f5e984..3384393d8feaf 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -907,6 +907,10 @@ static Settings aggregateIndexSettings( ); } + List validationErrors = new ArrayList<>(); + validateIndexReplicationTypeSettings(indexSettingsBuilder.build(), clusterSettings).ifPresent(validationErrors::add); + validateErrors(request.index(), validationErrors); + Settings indexSettings = indexSettingsBuilder.build(); /* * We can not validate settings until we have applied templates, otherwise we do not know the actual settings @@ -1246,7 +1250,11 @@ private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState public void validateIndexSettings(String indexName, final Settings settings, final boolean forbidPrivateIndexSettings) throws IndexCreationException { List validationErrors = getIndexSettingsValidationErrors(settings, forbidPrivateIndexSettings, indexName); + validateIndexReplicationTypeSettings(settings, clusterService.getClusterSettings()).ifPresent(validationErrors::add); + validateErrors(indexName, validationErrors); + } + private static void validateErrors(String indexName, List validationErrors) { if (validationErrors.isEmpty() == false) { ValidationException validationException = new ValidationException(); validationException.addValidationErrors(validationErrors); @@ -1322,6 +1330,27 @@ private static List validateIndexCustomPath(Settings settings, @Nullable return validationErrors; } + /** + * Validates {@code index.replication.type} is matches with cluster level setting {@code cluster.indices.replication.strategy} + * when {@code cluster.index.restrict.replication.type} is set to true. + * + * @param requestSettings settings passed in during index create request + * @param clusterSettings cluster setting + */ + private static Optional validateIndexReplicationTypeSettings(Settings requestSettings, ClusterSettings clusterSettings) { + if (clusterSettings.get(IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING) + && requestSettings.hasValue(SETTING_REPLICATION_TYPE) + && requestSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey()) + .equals(clusterSettings.get(CLUSTER_REPLICATION_TYPE_SETTING).name()) == false) { + return Optional.of( + "index setting [index.replication.type] is not allowed to be set as [" + + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey() + + "=true]" + ); + } + return Optional.empty(); + } + /** * Validates the settings and mappings for shrinking an index. * diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index ab0ea89f4734d..fa4b0f475edc5 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -701,7 +701,8 @@ public void apply(Settings value, Settings current, Settings previous) { AdmissionControlSettings.ADMISSION_CONTROL_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.CPU_BASED_ADMISSION_CONTROLLER_TRANSPORT_LAYER_MODE, CpuBasedAdmissionControllerSettings.INDEXING_CPU_USAGE_LIMIT, - CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT + CpuBasedAdmissionControllerSettings.SEARCH_CPU_USAGE_LIMIT, + IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 3d1794f8d3197..5c3beaf8509bd 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -301,6 +301,19 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); + /** + * If enabled, this setting enforces that indexes will be created with a replication type matching the cluster setting + * defined in cluster.indices.replication.strategy by rejecting any request that specifies a replication type that + * does not match the cluster setting. If disabled, a user can choose a replication type on a per-index basis using + * the index.replication.type setting. + */ + public static final Setting CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING = Setting.boolSetting( + "cluster.index.restrict.replication.type", + false, + Property.NodeScope, + Property.Final + ); + /** * The node's settings. */ diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index c4a782209421b..cea151748bfb6 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -71,6 +71,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.IndexCreationException; import org.opensearch.indices.IndicesService; import org.opensearch.indices.InvalidAliasNameException; import org.opensearch.indices.InvalidIndexNameException; @@ -137,6 +138,7 @@ import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.index.IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_INDEX_RESTRICT_ASYNC_DURABILITY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; @@ -166,6 +168,9 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; + final String REPLICATION_MISMATCH_VALIDATION_ERROR = + "Validation Failed: 1: index setting [index.replication.type] is not allowed to be set as [cluster.index.restrict.replication.type=true];"; + @Before public void setup() throws Exception { super.setUp(); @@ -1239,6 +1244,105 @@ public void testIndexTemplateReplicationType() { assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey())); } + public void testClusterForceReplicationTypeInAggregateSettings() { + Settings settings = Settings.builder() + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + Settings nonMatchingReplicationIndexSettings = Settings.builder() + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) + .build(); + request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); + request.settings(nonMatchingReplicationIndexSettings); + IndexCreationException exception = expectThrows( + IndexCreationException.class, + () -> aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ) + ); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getCause().getMessage()); + + Settings matchingReplicationIndexSettings = Settings.builder() + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .build(); + request.settings(matchingReplicationIndexSettings); + Settings aggregateIndexSettings = aggregateIndexSettings( + ClusterState.EMPTY_STATE, + request, + Settings.EMPTY, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Collections.emptySet(), + clusterSettings + ); + assertEquals(ReplicationType.SEGMENT.toString(), aggregateIndexSettings.get(INDEX_REPLICATION_TYPE_SETTING.getKey())); + } + + public void testClusterForceReplicationTypeInValidateIndexSettings() { + ClusterService clusterService = mock(ClusterService.class); + Metadata metadata = Metadata.builder() + .transientSettings(Settings.builder().put(Metadata.DEFAULT_REPLICA_COUNT_SETTING.getKey(), 1).build()) + .build(); + ClusterState clusterState = ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .build(); + ThreadPool threadPool = new TestThreadPool(getTestName()); + // Enforce cluster level replication type setting + final Settings forceClusterSettingEnabled = Settings.builder() + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), true) + .build(); + ClusterSettings clusterSettings = new ClusterSettings(forceClusterSettingEnabled, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getSettings()).thenReturn(forceClusterSettingEnabled); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + when(clusterService.state()).thenReturn(clusterState); + + final MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + forceClusterSettingEnabled, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(forceClusterSettingEnabled, clusterService.getClusterSettings()) + ); + // Use DOCUMENT replication type setting for index creation + final Settings indexSettings = Settings.builder().put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT).build(); + + IndexCreationException exception = expectThrows( + IndexCreationException.class, + () -> checkerService.validateIndexSettings("test", indexSettings, false) + ); + assertEquals(REPLICATION_MISMATCH_VALIDATION_ERROR, exception.getCause().getMessage()); + + // Cluster level replication type setting not enforced + final Settings forceClusterSettingDisabled = Settings.builder() + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(CLUSTER_INDEX_RESTRICT_REPLICATION_TYPE_SETTING.getKey(), false) + .build(); + clusterSettings = new ClusterSettings(forceClusterSettingDisabled, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + when(clusterService.getClusterSettings()).thenReturn(clusterSettings); + checkerService.validateIndexSettings("test", indexSettings, false); + threadPool.shutdown(); + } + public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) From 02beac28363ef43f2e4437496e0158ec035e4f63 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 21 Dec 2023 15:34:12 -0600 Subject: [PATCH 10/81] Fix CHANGELOG entries that were added to wrong section (#11662) Signed-off-by: Andrew Ross --- CHANGELOG.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ca10ecf57e2b6..3c4a6f198e286 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,14 +9,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add events correlation engine plugin ([#6854](https://github.com/opensearch-project/OpenSearch/issues/6854)) -- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - [Admission Control] Integrate CPU AC with ResourceUsageCollector and add CPU AC stats to nodes/stats ([#10887](https://github.com/opensearch-project/OpenSearch/pull/10887)) -- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378)) ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 @@ -46,7 +44,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([#8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) - Bump JNA version from 5.5 to 5.13 ([#9963](https://github.com/opensearch-project/OpenSearch/pull/9963)) -- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump `org.eclipse.jgit` from 6.5.0 to 6.7.0 ([#10147](https://github.com/opensearch-project/OpenSearch/pull/10147)) - Bump OpenTelemetry from 1.30.1 to 1.31.0 ([#10617](https://github.com/opensearch-project/OpenSearch/pull/10617)) - Bump OpenTelemetry from 1.31.0 to 1.32.0 and OpenTelemetry Semconv from 1.21.0-alpha to 1.23.1-alpha ([#11305](https://github.com/opensearch-project/OpenSearch/pull/11305)) @@ -59,7 +56,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) -- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) @@ -85,8 +81,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) -- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) -- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) ### Security @@ -120,11 +114,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Allow changing number of replicas of searchable snapshot index ([#11317](https://github.com/opensearch-project/OpenSearch/pull/11317)) - Adding slf4j license header to LoggerMessageFormat.java ([#11069](https://github.com/opensearch-project/OpenSearch/pull/11069)) - [BWC and API enforcement] Introduce checks for enforcing the API restrictions ([#11175](https://github.com/opensearch-project/OpenSearch/pull/11175)) +- Maintainer approval check ([#11378](https://github.com/opensearch-project/OpenSearch/pull/11378)) - Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) - Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) - Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) ### Dependencies +- Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) - Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) @@ -160,20 +156,22 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) - Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) +- Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) - [BUG] Fix java.lang.SecurityException in repository-gcs plugin ([#10642](https://github.com/opensearch-project/OpenSearch/pull/10642)) - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) +- Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) - Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895)) -- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) +- Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) - Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) - Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) +- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) - Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) - Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) -- Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) - Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) - Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) @@ -188,14 +186,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) - Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) -- Fix SuggestSearch.testSkipDuplicates by forceing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) - Delegating CachingWeightWrapper#count to internal weight object ([#10543](https://github.com/opensearch-project/OpenSearch/pull/10543)) - Fix per request latency last phase not tracked ([#10934](https://github.com/opensearch-project/OpenSearch/pull/10934)) -- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) +- Fix SuggestSearch.testSkipDuplicates by forcing refresh when indexing its test documents ([#11068](https://github.com/opensearch-project/OpenSearch/pull/11068)) +- [BUG] Fix the thread context that is not properly cleared and messes up the traces ([#10873](https://github.com/opensearch-project/OpenSearch/pull/10873)) +- Handle canMatchSearchAfter for frozen context scenario ([#11249](https://github.com/opensearch-project/OpenSearch/pull/11249)) - Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) - Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) - Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) - Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) +- Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) - Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) - Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) From a3524b3351a6261fdeb5610f7eac02a7d327247a Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 21 Dec 2023 15:44:39 -0600 Subject: [PATCH 11/81] Remove unused java11 gradle configuration (#11661) The java11-specific sources were removed back in #2898 and as far as I can tell this configuration has been unneeded since that point. Signed-off-by: Andrew Ross --- server/build.gradle | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/server/build.gradle b/server/build.gradle index bf0c7791aeb55..e36498bf1038b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -57,45 +57,6 @@ sourceSets { } } } -// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs -if (!isEclipse) { - sourceSets { - java11 { - java { - srcDirs = ['src/main/java11'] - } - } - } - - configurations { - java11Implementation.extendsFrom(api) - } - - dependencies { - java11Implementation sourceSets.main.output - } - - compileJava11Java { - sourceCompatibility = JavaVersion.VERSION_11 - targetCompatibility = JavaVersion.VERSION_11 - } - - tasks.named('forbiddenApisJava11').configure { - doFirst { - if (BuildParams.runtimeJavaVersion < JavaVersion.VERSION_11) { - targetCompatibility = JavaVersion.VERSION_11 - } - } - } - - jar { - metaInf { - into 'versions/11' - from sourceSets.java11.output - } - manifest.attributes('Multi-Release': 'true') - } -} dependencies { From fe46dde351835f34f59c5e357e0a3b71d9a31f18 Mon Sep 17 00:00:00 2001 From: Taeik Lim Date: Fri, 22 Dec 2023 07:15:46 +0900 Subject: [PATCH 12/81] Change visibility of action constructor (public -> private) (#11659) - MainAction - RemoteInfoAction Signed-off-by: Taeik Lim --- .../action/admin/cluster/remote/RemoteInfoAction.java | 2 +- server/src/main/java/org/opensearch/action/main/MainAction.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java index 55f75a142a53c..7e4911c10c50e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remote/RemoteInfoAction.java @@ -44,7 +44,7 @@ public final class RemoteInfoAction extends ActionType { public static final String NAME = "cluster:monitor/remote/info"; public static final RemoteInfoAction INSTANCE = new RemoteInfoAction(); - public RemoteInfoAction() { + private RemoteInfoAction() { super(NAME, RemoteInfoResponse::new); } } diff --git a/server/src/main/java/org/opensearch/action/main/MainAction.java b/server/src/main/java/org/opensearch/action/main/MainAction.java index c5cbac824ec83..28a31a92d7f16 100644 --- a/server/src/main/java/org/opensearch/action/main/MainAction.java +++ b/server/src/main/java/org/opensearch/action/main/MainAction.java @@ -44,7 +44,7 @@ public class MainAction extends ActionType { public static final String NAME = "cluster:monitor/main"; public static final MainAction INSTANCE = new MainAction(); - public MainAction() { + private MainAction() { super(NAME, MainResponse::new); } } From f92f846a1f9b30a055dde846fd12d987a511723a Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 21 Dec 2023 18:13:52 -0600 Subject: [PATCH 13/81] Remove unused java11 gradle configuration from lib/core (#11665) This is the same unused java11 configuration that was removed from the server module in #11661. Signed-off-by: Andrew Ross --- libs/core/build.gradle | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 4850b5aea5c85..0cf2cd0bf92b6 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -36,45 +36,6 @@ base { archivesName = 'opensearch-core' } -// we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs -if (!isEclipse) { - sourceSets { - java11 { - java { - srcDirs = ['src/main/java11'] - } - } - } - - configurations { - java11Compile.extendsFrom(compile) - } - - dependencies { - java11Implementation sourceSets.main.output - } - - compileJava11Java { - sourceCompatibility = JavaVersion.VERSION_11 - targetCompatibility = JavaVersion.VERSION_11 - } - - forbiddenApisJava11 { - if (BuildParams.runtimeJavaVersion < JavaVersion.VERSION_11) { - targetCompatibility = JavaVersion.VERSION_11 - } - replaceSignatureFiles 'jdk-signatures' - } - - jar { - metaInf { - into 'versions/11' - from sourceSets.java11.output - } - manifest.attributes('Multi-Release': 'true') - } -} - dependencies { api project(':libs:opensearch-common') From f7d291456810603f259a243aee98188be5c7070a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 17:44:16 -0500 Subject: [PATCH 14/81] Bump com.netflix.nebula:nebula-publishing-plugin from 20.3.0 to 21.0.0 (#11671) * Bump com.netflix.nebula:nebula-publishing-plugin from 20.3.0 to 21.0.0 Bumps [com.netflix.nebula:nebula-publishing-plugin](https://github.com/nebula-plugins/nebula-publishing-plugin) from 20.3.0 to 21.0.0. - [Release notes](https://github.com/nebula-plugins/nebula-publishing-plugin/releases) - [Changelog](https://github.com/nebula-plugins/nebula-publishing-plugin/blob/main/CHANGELOG.md) - [Commits](https://github.com/nebula-plugins/nebula-publishing-plugin/compare/v20.3.0...v21.0.0) --- updated-dependencies: - dependency-name: com.netflix.nebula:nebula-publishing-plugin dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + buildSrc/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c4a6f198e286..dcd645065b488 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -151,6 +151,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.wiremock:wiremock-standalone` from 3.1.0 to 3.3.1 ([#11555](https://github.com/opensearch-project/OpenSearch/pull/11555)) - Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556)) - Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) +- Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 0efa170250a7b..40ab0801223b1 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -106,7 +106,7 @@ dependencies { api 'org.apache.commons:commons-compress:1.25.0' api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' - api 'com.netflix.nebula:nebula-publishing-plugin:20.3.0' + api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' api 'commons-io:commons-io:2.15.1' From bb4602daa82e7ebfc8b4fba602dd7d36e5e1c241 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Dec 2023 09:32:38 -0500 Subject: [PATCH 15/81] Bump commons-io:commons-io from 2.15.0 to 2.15.1 in /plugins/ingest-attachment (#11560) * Bump commons-io:commons-io in /plugins/ingest-attachment Bumps commons-io:commons-io from 2.15.0 to 2.15.1. --- updated-dependencies: - dependency-name: commons-io:commons-io dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 | 1 - plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index dcd645065b488..c0421f0c1d8fe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -122,7 +122,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) - Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.6.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 57a2493053956..22db73ad86796 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -57,7 +57,7 @@ dependencies { runtimeOnly "com.google.guava:guava:${versions.guava}" // Other dependencies api 'org.tukaani:xz:1.9' - api 'commons-io:commons-io:2.15.0' + api 'commons-io:commons-io:2.15.1' api "org.slf4j:slf4j-api:${versions.slf4j}" // character set detection diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 deleted file mode 100644 index 73709383fd130..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-io-2.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5c3c2db10f6f797430a7f9c696b4d1273768c924 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 b/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 new file mode 100644 index 0000000000000..47c5d13812a36 --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-io-2.15.1.jar.sha1 @@ -0,0 +1 @@ +f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file From b9022c5dcf6fdc8dc538bcfbb8fd7889982c25c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Dec 2023 10:52:01 -0500 Subject: [PATCH 16/81] Bump commons-cli:commons-cli from 1.5.0 to 1.6.0 in /plugins/repository-hdfs (#10996) * Bump commons-cli:commons-cli in /plugins/repository-hdfs Bumps commons-cli:commons-cli from 1.5.0 to 1.6.0. --- updated-dependencies: - dependency-name: commons-cli:commons-cli dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index c0421f0c1d8fe..dbb9216f5d40e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -152,6 +152,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-compress` from 1.24.0 to 1.25.0 ([#11556](https://github.com/opensearch-project/OpenSearch/pull/11556)) - Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) - Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) +- Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index ed1f54888a26f..7736390d58fe1 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -70,7 +70,7 @@ dependencies { api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" - api 'commons-cli:commons-cli:1.5.0' + api 'commons-cli:commons-cli:1.6.0' api "commons-codec:commons-codec:${versions.commonscodec}" api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 deleted file mode 100644 index 8f9e064eda2d0..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-cli-1.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc98be5d5390230684a092589d70ea76a147925c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 new file mode 100644 index 0000000000000..bb94eda6814ea --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-cli-1.6.0.jar.sha1 @@ -0,0 +1 @@ +38166a23afb5bd5520f739b87b3be87f7f0fb96d \ No newline at end of file From 71d7e4daf7dc05c9598d447e4424072e76de79d1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Dec 2023 14:43:25 -0500 Subject: [PATCH 17/81] Bump com.maxmind.geoip2:geoip2 from 4.1.0 to 4.2.0 in /modules/ingest-geoip (#11559) * Bump com.maxmind.geoip2:geoip2 in /modules/ingest-geoip Bumps [com.maxmind.geoip2:geoip2](https://github.com/maxmind/GeoIP2-java) from 4.1.0 to 4.2.0. - [Release notes](https://github.com/maxmind/GeoIP2-java/releases) - [Changelog](https://github.com/maxmind/GeoIP2-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/maxmind/GeoIP2-java/compare/v4.1.0...v4.2.0) --- updated-dependencies: - dependency-name: com.maxmind.geoip2:geoip2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + modules/ingest-geoip/build.gradle | 2 +- modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 | 1 - modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index dbb9216f5d40e..2b70e225e715a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -153,6 +153,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `actions/stale` from 8 to 9 ([#11557](https://github.com/opensearch-project/OpenSearch/pull/11557)) - Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) - Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) +- Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index e126cf37e33a2..45b2ff74a5826 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,7 +39,7 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:4.1.0') + api('com.maxmind.geoip2:geoip2:4.2.0') // geoip2 dependencies: api('com.maxmind.db:maxmind-db:3.0.0') api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") diff --git a/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 deleted file mode 100644 index 0d124299e4cfb..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6b356cc91863409ba3475a148ee11a3a6d6aa4b \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 new file mode 100644 index 0000000000000..b6bfeeb9da60b --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 @@ -0,0 +1 @@ +78ff932dc13ac41dd1f0fd9e7405a7f4ad815ce0 \ No newline at end of file From 316b60afde91f3cf7d026b6a53670ef63f09d679 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Wed, 27 Dec 2023 14:30:22 -0800 Subject: [PATCH 18/81] Fix flaky testPostFilterDisablesCountOptimization with concurrent search enabled (#11674) Signed-off-by: Ticheng Lin --- .../search/query/QueryProfilePhaseTests.java | 44 ++++++++++++++----- 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 62dcf54e25578..ba1600e6eb651 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -1401,12 +1401,22 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); if (executor != null) { - assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); - assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThan(0L)); + assertThat(minScore, greaterThan(0L)); + assertThat(avgScore, greaterThan(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThan(0L)); + assertThat(minScoreCount, greaterThan(0L)); + assertThat(avgScoreCount, greaterThan(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -1436,12 +1446,22 @@ public void testCollapseQuerySearchResults() throws Exception { assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(6L)); if (executor != null) { - assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThanOrEqualTo(6L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThanOrEqualTo(2L)); - assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThanOrEqualTo(6L)); + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThan(0L)); + assertThat(minScore, greaterThan(0L)); + assertThat(avgScore, greaterThan(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThan(0L)); + assertThat(minScoreCount, greaterThan(0L)); + assertThat(avgScoreCount, greaterThan(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); From 60b2265d9390f27f80803f16eb4d1c5cdc0f4947 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Thu, 28 Dec 2023 22:11:09 +0800 Subject: [PATCH 19/81] Update supported version for max_shard_size parameter in Shrink API (#11439) * Update supported version for max_shard_size parameter in Shrink API Signed-off-by: Gao Binlong * Modify changelog Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../rest-api-spec/test/indices.shrink/40_max_shard_size.yml | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b70e225e715a..79fd7d04eadae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) +- Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml index 32ac11097d3dc..bac2898ccea1c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml @@ -4,8 +4,8 @@ # number_of_shards for the target index. - skip: - version: " - 2.99.99" - reason: "only available in 3.0+" + version: " - 2.4.99" + reason: "max_shard_size was introduced in 2.5.0" features: allowed_warnings - do: From 2dec5dc29bcdb39b15cdb2b1508f36d6ebc13188 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jan 2024 17:34:39 -0500 Subject: [PATCH 20/81] Bump org.apache.commons:commons-lang3 from 3.13.0 to 3.14.0 in /plugins/repository-hdfs (#11691) * Bump org.apache.commons:commons-lang3 in /plugins/repository-hdfs Bumps org.apache.commons:commons-lang3 from 3.13.0 to 3.14.0. --- updated-dependencies: - dependency-name: org.apache.commons:commons-lang3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 79fd7d04eadae..050c0ad96c75f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -155,6 +155,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.netflix.nebula:nebula-publishing-plugin` from 20.3.0 to 21.0.0 ([#11671](https://github.com/opensearch-project/OpenSearch/pull/11671)) - Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) - Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) +- Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 7736390d58fe1..f04d42a2155d6 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -76,7 +76,7 @@ dependencies { api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.9.0' api 'commons-io:commons-io:2.14.0' - api 'org.apache.commons:commons-lang3:3.13.0' + api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 deleted file mode 100644 index d0c2f2486ee1f..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 new file mode 100644 index 0000000000000..d783e07e40902 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.14.0.jar.sha1 @@ -0,0 +1 @@ +1ed471194b02f2c6cb734a0cd6f6f107c673afae \ No newline at end of file From a37acba3a4946a117f65183add6688a9382afa12 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jan 2024 17:37:40 -0500 Subject: [PATCH 21/81] Bump org.apache.logging.log4j:log4j-core from 2.22.0 to 2.22.1 in /buildSrc/src/testKit/thirdPartyAudit/sample_jars (#11695) * Bump org.apache.logging.log4j:log4j-core Bumps org.apache.logging.log4j:log4j-core from 2.22.0 to 2.22.1. --- updated-dependencies: - dependency-name: org.apache.logging.log4j:log4j-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 050c0ad96c75f..a7fee08a7420b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -132,7 +132,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.bouncycastle:bc-fips` from 1.0.2.3 to 1.0.2.4 ([#10297](https://github.com/opensearch-project/OpenSearch/pull/10297)) - Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635)) - Bump `com.squareup.okio:okio` from 3.5.0 to 3.7.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637), [#11632](https://github.com/opensearch-project/OpenSearch/pull/11632)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.22.1 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858), [#11000](https://github.com/opensearch-project/OpenSearch/pull/11000), [#11270](https://github.com/opensearch-project/OpenSearch/pull/11270), [#11695](https://github.com/opensearch-project/OpenSearch/pull/11695)) - Bump `aws-actions/configure-aws-credentials` from 2 to 4 ([#10504](https://github.com/opensearch-project/OpenSearch/pull/10504)) - Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) - Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index f24b61ef0d165..351b42e5bc921 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.22.0" + implementation "org.apache.logging.log4j:log4j-core:2.22.1" } ["0.0.1", "0.0.2"].forEach { v -> From 5ccd134fb9faf5fe70db6b6f6f274359ddcaabcb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Jan 2024 20:42:29 -0500 Subject: [PATCH 22/81] Bump com.maxmind.db:maxmind-db from 3.0.0 to 3.1.0 in /modules/ingest-geoip (#11693) * Bump com.maxmind.db:maxmind-db in /modules/ingest-geoip Bumps [com.maxmind.db:maxmind-db](https://github.com/maxmind/MaxMind-DB-Reader-java) from 3.0.0 to 3.1.0. - [Release notes](https://github.com/maxmind/MaxMind-DB-Reader-java/releases) - [Changelog](https://github.com/maxmind/MaxMind-DB-Reader-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/maxmind/MaxMind-DB-Reader-java/compare/v3.0.0...v3.1.0) --- updated-dependencies: - dependency-name: com.maxmind.db:maxmind-db dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + modules/ingest-geoip/build.gradle | 2 +- modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 | 1 - modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index a7fee08a7420b..6a1937c807a25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -156,6 +156,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `commons-cli:commons-cli` from 1.5.0 to 1.6.0 ([#10996](https://github.com/opensearch-project/OpenSearch/pull/10996)) - Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) - Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) +- Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 45b2ff74a5826..c0ff155ce1038 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -41,7 +41,7 @@ opensearchplugin { dependencies { api('com.maxmind.geoip2:geoip2:4.2.0') // geoip2 dependencies: - api('com.maxmind.db:maxmind-db:3.0.0') + api('com.maxmind.db:maxmind-db:3.1.0') api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") diff --git a/modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 deleted file mode 100644 index 89b0c4c49b450..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-3.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79dcda62168a77caf595f8fda101baa17fef125d \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 new file mode 100644 index 0000000000000..9db7c7319af0b --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 @@ -0,0 +1 @@ +2008992ab45d61c7b28a18678b5df82272529da3 \ No newline at end of file From 6a01d2f95b7e98c601949a58b92b91e3e4022e17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Jan 2024 08:07:20 -0500 Subject: [PATCH 23/81] Bump com.avast.gradle:gradle-docker-compose-plugin from 0.17.5 to 0.17.6 (#11692) * Bump com.avast.gradle:gradle-docker-compose-plugin from 0.17.5 to 0.17.6 Bumps [com.avast.gradle:gradle-docker-compose-plugin](https://github.com/avast/gradle-docker-compose-plugin) from 0.17.5 to 0.17.6. - [Release notes](https://github.com/avast/gradle-docker-compose-plugin/releases) - [Commits](https://github.com/avast/gradle-docker-compose-plugin/compare/0.17.5...0.17.6) --- updated-dependencies: - dependency-name: com.avast.gradle:gradle-docker-compose-plugin dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- buildSrc/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a1937c807a25..579d2695d3125 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -138,7 +138,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) - Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) - Bump `netty` from 4.1.100.Final to 4.1.101.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294)) -- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.5 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163)) +- Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.6 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163), [#11692](https://github.com/opensearch-project/OpenSearch/pull/11692)) - Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) - Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) - Bump `reactor-netty-core` from 1.1.12 to 1.1.13 ([#11350](https://github.com/opensearch-project/OpenSearch/pull/11350)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 40ab0801223b1..a42976fef572c 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -115,7 +115,7 @@ dependencies { api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.6' - api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.5' + api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.6' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.6' api 'com.networknt:json-schema-validator:1.0.86' From 63f4f1397f2e47e717f8797eae1e6516360c4052 Mon Sep 17 00:00:00 2001 From: Gaurav Chandani Date: Tue, 2 Jan 2024 23:14:16 +0530 Subject: [PATCH 24/81] Batch Async Fetcher class changes (#8742) * Async Fetcher class changes Signed-off-by: Gaurav Chandani --- .../TransportIndicesShardStoresAction.java | 6 +- .../opensearch/gateway/AsyncShardFetch.java | 140 +++++++++++++----- .../opensearch/gateway/GatewayAllocator.java | 25 +++- ...ransportNodesListGatewayStartedShards.java | 11 +- .../indices/store/ShardAttributes.java | 59 ++++++++ .../TransportNodesListShardStoreMetadata.java | 10 +- .../gateway/AsyncShardFetchTests.java | 80 ++++++---- .../gateway/PrimaryShardAllocatorTests.java | 6 +- .../gateway/ReplicaShardAllocatorTests.java | 6 +- .../indices/store/ShardAttributesTests.java | 49 ++++++ .../test/gateway/TestGatewayAllocator.java | 12 +- 11 files changed, 319 insertions(+), 85 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/store/ShardAttributes.java create mode 100644 server/src/test/java/org/opensearch/indices/store/ShardAttributesTests.java diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 41225bc362235..04166c88a00ad 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -195,7 +195,7 @@ void start() { } else { for (Tuple shard : shards) { InternalAsyncFetch fetch = new InternalAsyncFetch(logger, "shard_stores", shard.v1(), shard.v2(), listShardStoresInfo); - fetch.fetchData(nodes, Collections.emptySet()); + fetch.fetchData(nodes, Collections.emptyMap()); } } } @@ -223,7 +223,7 @@ protected synchronized void processAsyncFetch( List failures, long fetchingRound ) { - fetchResponses.add(new Response(shardId, responses, failures)); + fetchResponses.add(new Response(shardAttributesMap.keySet().iterator().next(), responses, failures)); if (expectedOps.countDown()) { finish(); } @@ -312,7 +312,7 @@ private boolean shardExistsInNode(final NodeGatewayStartedShards response) { } @Override - protected void reroute(ShardId shardId, String reason) { + protected void reroute(String shardId, String reason) { // no-op } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java index d0ade4eb25168..50774f7e0cb1c 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java @@ -46,6 +46,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.store.ShardAttributes; import org.opensearch.transport.ReceiveTimeoutTransportException; import java.util.ArrayList; @@ -54,12 +55,11 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; -import static java.util.Collections.emptySet; -import static java.util.Collections.unmodifiableSet; +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableMap; /** * Allows to asynchronously fetch shard related data from other nodes for allocation, without blocking @@ -69,6 +69,7 @@ * and once the results are back, it makes sure to schedule a reroute to make sure those results will * be taken into account. * + * It comes in two modes, to single fetch a shard or fetch a batch of shards. * @opensearch.internal */ public abstract class AsyncShardFetch implements Releasable { @@ -77,18 +78,21 @@ public abstract class AsyncShardFetch implements Rel * An action that lists the relevant shard data that needs to be fetched. */ public interface Lister, NodeResponse extends BaseNodeResponse> { - void list(ShardId shardId, @Nullable String customDataPath, DiscoveryNode[] nodes, ActionListener listener); + void list(Map shardAttributesMap, DiscoveryNode[] nodes, ActionListener listener); + } protected final Logger logger; protected final String type; - protected final ShardId shardId; - protected final String customDataPath; + protected final Map shardAttributesMap; private final Lister, T> action; private final Map> cache = new HashMap<>(); - private final Set nodesToIgnore = new HashSet<>(); private final AtomicLong round = new AtomicLong(); private boolean closed; + private final String reroutingKey; + private final Map> shardToIgnoreNodes = new HashMap<>(); + + private final boolean enableBatchMode; @SuppressWarnings("unchecked") protected AsyncShardFetch( @@ -100,9 +104,36 @@ protected AsyncShardFetch( ) { this.logger = logger; this.type = type; - this.shardId = Objects.requireNonNull(shardId); - this.customDataPath = Objects.requireNonNull(customDataPath); + shardAttributesMap = new HashMap<>(); + shardAttributesMap.put(shardId, new ShardAttributes(shardId, customDataPath)); + this.action = (Lister, T>) action; + this.reroutingKey = "ShardId=[" + shardId.toString() + "]"; + enableBatchMode = false; + } + + /** + * Added to fetch a batch of shards from nodes + * + * @param logger Logger + * @param type type of action + * @param shardAttributesMap Map of {@link ShardId} to {@link ShardAttributes} to perform fetching on them a + * @param action Transport Action + * @param batchId For the given ShardAttributesMap, we expect them to tie with a single batch id for logging and later identification + */ + @SuppressWarnings("unchecked") + protected AsyncShardFetch( + Logger logger, + String type, + Map shardAttributesMap, + Lister, T> action, + String batchId + ) { + this.logger = logger; + this.type = type; + this.shardAttributesMap = shardAttributesMap; this.action = (Lister, T>) action; + this.reroutingKey = "BatchID=[" + batchId + "]"; + enableBatchMode = true; } @Override @@ -130,11 +161,32 @@ public synchronized int getNumberOfInFlightFetches() { * The ignoreNodes are nodes that are supposed to be ignored for this round, since fetching is async, we need * to keep them around and make sure we add them back when all the responses are fetched and returned. */ - public synchronized FetchResult fetchData(DiscoveryNodes nodes, Set ignoreNodes) { + public synchronized FetchResult fetchData(DiscoveryNodes nodes, Map> ignoreNodes) { if (closed) { - throw new IllegalStateException(shardId + ": can't fetch data on closed async fetch"); + throw new IllegalStateException(reroutingKey + ": can't fetch data on closed async fetch"); } - nodesToIgnore.addAll(ignoreNodes); + + if (enableBatchMode == false) { + // we will do assertions here on ignoreNodes + if (ignoreNodes.size() > 1) { + throw new IllegalStateException( + "Fetching Shard Data, " + reroutingKey + "Can only have atmost one shard" + "for non-batch mode" + ); + } + if (ignoreNodes.size() == 1) { + if (shardAttributesMap.containsKey(ignoreNodes.keySet().iterator().next()) == false) { + throw new IllegalStateException("Shard Id must be same as initialized in AsyncShardFetch. Expecting = " + reroutingKey); + } + } + } + + // add the nodes to ignore to the list of nodes to ignore for each shard + for (Map.Entry> ignoreNodesEntry : ignoreNodes.entrySet()) { + Set ignoreNodesSet = shardToIgnoreNodes.getOrDefault(ignoreNodesEntry.getKey(), new HashSet<>()); + ignoreNodesSet.addAll(ignoreNodesEntry.getValue()); + shardToIgnoreNodes.put(ignoreNodesEntry.getKey(), ignoreNodesSet); + } + fillShardCacheWithDataNodes(cache, nodes); List> nodesToFetch = findNodesToFetch(cache); if (nodesToFetch.isEmpty() == false) { @@ -153,7 +205,7 @@ public synchronized FetchResult fetchData(DiscoveryNodes nodes, Set i // if we are still fetching, return null to indicate it if (hasAnyNodeFetching(cache)) { - return new FetchResult<>(shardId, null, emptySet()); + return new FetchResult<>(null, emptyMap()); } else { // nothing to fetch, yay, build the return value Map fetchData = new HashMap<>(); @@ -177,16 +229,27 @@ public synchronized FetchResult fetchData(DiscoveryNodes nodes, Set i } } } - Set allIgnoreNodes = unmodifiableSet(new HashSet<>(nodesToIgnore)); + + Map> allIgnoreNodesMap = unmodifiableMap(new HashMap<>(shardToIgnoreNodes)); // clear the nodes to ignore, we had a successful run in fetching everything we can // we need to try them if another full run is needed - nodesToIgnore.clear(); + shardToIgnoreNodes.clear(); // if at least one node failed, make sure to have a protective reroute // here, just case this round won't find anything, and we need to retry fetching data - if (failedNodes.isEmpty() == false || allIgnoreNodes.isEmpty() == false) { - reroute(shardId, "nodes failed [" + failedNodes.size() + "], ignored [" + allIgnoreNodes.size() + "]"); + + if (failedNodes.isEmpty() == false + || allIgnoreNodesMap.values().stream().anyMatch(ignoreNodeSet -> ignoreNodeSet.isEmpty() == false)) { + reroute( + reroutingKey, + "nodes failed [" + + failedNodes.size() + + "], ignored [" + + allIgnoreNodesMap.values().stream().mapToInt(Set::size).sum() + + "]" + ); } - return new FetchResult<>(shardId, fetchData, allIgnoreNodes); + + return new FetchResult<>(fetchData, allIgnoreNodesMap); } } @@ -199,10 +262,10 @@ public synchronized FetchResult fetchData(DiscoveryNodes nodes, Set i protected synchronized void processAsyncFetch(List responses, List failures, long fetchingRound) { if (closed) { // we are closed, no need to process this async fetch at all - logger.trace("{} ignoring fetched [{}] results, already closed", shardId, type); + logger.trace("{} ignoring fetched [{}] results, already closed", reroutingKey, type); return; } - logger.trace("{} processing fetched [{}] results", shardId, type); + logger.trace("{} processing fetched [{}] results", reroutingKey, type); if (responses != null) { for (T response : responses) { @@ -212,7 +275,7 @@ protected synchronized void processAsyncFetch(List responses, List fetchingRound : "node entries only replaced by newer rounds"; logger.trace( "{} received response for [{}] from node {} for an older fetching round (expected: {} but was: {})", - shardId, + reroutingKey, nodeEntry.getNodeId(), type, nodeEntry.getFetchingRound(), @@ -221,14 +284,14 @@ protected synchronized void processAsyncFetch(List responses, List responses, List nodeEntry = cache.get(failure.nodeId()); if (nodeEntry != null) { if (nodeEntry.getFetchingRound() != fetchingRound) { assert nodeEntry.getFetchingRound() > fetchingRound : "node entries only replaced by newer rounds"; logger.trace( "{} received failure for [{}] from node {} for an older fetching round (expected: {} but was: {})", - shardId, + reroutingKey, nodeEntry.getNodeId(), type, nodeEntry.getFetchingRound(), @@ -261,7 +324,7 @@ protected synchronized void processAsyncFetch(List responses, List new ParameterizedMessage( "{}: failed to list shard for {} on node [{}]", - shardId, + reroutingKey, type, failure.nodeId() ), @@ -273,13 +336,13 @@ protected synchronized void processAsyncFetch(List responses, List> shardCache) { */ // visible for testing void asyncFetch(final DiscoveryNode[] nodes, long fetchingRound) { - logger.trace("{} fetching [{}] from {}", shardId, type, nodes); - action.list(shardId, customDataPath, nodes, new ActionListener>() { + logger.trace("{} fetching [{}] from {}", reroutingKey, type, nodes); + action.list(shardAttributesMap, nodes, new ActionListener>() { @Override public void onResponse(BaseNodesResponse response) { processAsyncFetch(response.getNodes(), response.failures(), fetchingRound); @@ -358,14 +421,12 @@ public void onFailure(Exception e) { */ public static class FetchResult { - private final ShardId shardId; private final Map data; - private final Set ignoreNodes; + private final Map> ignoredShardToNodes; - public FetchResult(ShardId shardId, Map data, Set ignoreNodes) { - this.shardId = shardId; + public FetchResult(Map data, Map> ignoreNodes) { this.data = data; - this.ignoreNodes = ignoreNodes; + this.ignoredShardToNodes = ignoreNodes; } /** @@ -389,9 +450,14 @@ public Map getData() { * Process any changes needed to the allocation based on this fetch result. */ public void processAllocation(RoutingAllocation allocation) { - for (String ignoreNode : ignoreNodes) { - allocation.addIgnoreShardForNode(shardId, ignoreNode); + for (Map.Entry> entry : ignoredShardToNodes.entrySet()) { + ShardId shardId = entry.getKey(); + Set ignoreNodes = entry.getValue(); + if (ignoreNodes.isEmpty() == false) { + ignoreNodes.forEach(nodeId -> allocation.addIgnoreShardForNode(shardId, nodeId)); + } } + } } diff --git a/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java index b5a00b1a47523..c8ef9364ebba9 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java @@ -56,6 +56,7 @@ import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Set; import java.util.Spliterators; @@ -226,7 +227,9 @@ private static void clearCacheForPrimary( AsyncShardFetch fetch, RoutingAllocation allocation ) { - ShardRouting primary = allocation.routingNodes().activePrimary(fetch.shardId); + assert fetch.shardAttributesMap.size() == 1 : "expected only one shard"; + ShardId shardId = fetch.shardAttributesMap.keySet().iterator().next(); + ShardRouting primary = allocation.routingNodes().activePrimary(shardId); if (primary != null) { fetch.clearCacheForNode(primary.currentNodeId()); } @@ -254,15 +257,15 @@ class InternalAsyncFetch extends AsyncShardFetch } @Override - protected void reroute(ShardId shardId, String reason) { - logger.trace("{} scheduling reroute for {}", shardId, reason); + protected void reroute(String reroutingKey, String reason) { + logger.trace("{} scheduling reroute for {}", reroutingKey, reason); assert rerouteService != null; rerouteService.reroute( "async_shard_fetch", Priority.HIGH, ActionListener.wrap( - r -> logger.trace("{} scheduled reroute completed for {}", shardId, reason), - e -> logger.debug(new ParameterizedMessage("{} scheduled reroute failed for {}", shardId, reason), e) + r -> logger.trace("{} scheduled reroute completed for {}", reroutingKey, reason), + e -> logger.debug(new ParameterizedMessage("{} scheduled reroute failed for {}", reroutingKey, reason), e) ) ); } @@ -293,7 +296,11 @@ protected AsyncShardFetch.FetchResult shardState = fetch.fetchData( allocation.nodes(), - allocation.getIgnoreNodes(shard.shardId()) + new HashMap<>() { + { + put(shard.shardId(), allocation.getIgnoreNodes(shard.shardId())); + } + } ); if (shardState.hasData()) { @@ -328,7 +335,11 @@ protected AsyncShardFetch.FetchResult shardStores = fetch.fetchData( allocation.nodes(), - allocation.getIgnoreNodes(shard.shardId()) + new HashMap<>() { + { + put(shard.shardId(), allocation.getIgnoreNodes(shard.shardId())); + } + } ); if (shardStores.hasData()) { shardStores.processAllocation(allocation); diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index a47893f37a97c..601a5c671d67c 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -62,12 +62,14 @@ import org.opensearch.index.store.Store; import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.store.ShardAttributes; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.List; +import java.util.Map; import java.util.Objects; /** @@ -124,7 +126,14 @@ public TransportNodesListGatewayStartedShards( } @Override - public void list(ShardId shardId, String customDataPath, DiscoveryNode[] nodes, ActionListener listener) { + public void list( + Map shardAttributesMap, + DiscoveryNode[] nodes, + ActionListener listener + ) { + assert shardAttributesMap.size() == 1 : "only one shard should be specified"; + final ShardId shardId = shardAttributesMap.keySet().iterator().next(); + final String customDataPath = shardAttributesMap.get(shardId).getCustomDataPath(); execute(new Request(shardId, customDataPath, nodes), listener); } diff --git a/server/src/main/java/org/opensearch/indices/store/ShardAttributes.java b/server/src/main/java/org/opensearch/indices/store/ShardAttributes.java new file mode 100644 index 0000000000000..4ef4e91f7af8c --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/store/ShardAttributes.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.opensearch.common.Nullable; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.gateway.AsyncShardFetch; + +import java.io.IOException; + +/** + * This class contains information about the shard that needs to be sent as part of request in Transport Action implementing + * {@link AsyncShardFetch.Lister} to fetch shard information in async manner + * + * @opensearch.internal + */ +public class ShardAttributes implements Writeable { + private final ShardId shardId; + @Nullable + private final String customDataPath; + + public ShardAttributes(ShardId shardId, String customDataPath) { + this.shardId = shardId; + this.customDataPath = customDataPath; + } + + public ShardAttributes(StreamInput in) throws IOException { + shardId = new ShardId(in); + customDataPath = in.readString(); + } + + public ShardId getShardId() { + return shardId; + } + + /** + * Returns the custom data path that is used to look up information for this shard. + * Returns an empty string if no custom data path is used for this index. + * Returns null if custom data path information is not available (due to BWC). + */ + @Nullable + public String getCustomDataPath() { + return customDataPath; + } + + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + out.writeString(customDataPath); + } +} diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index 0afeae253ae14..5a3c1038cd5f0 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -73,6 +73,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -125,7 +126,14 @@ public TransportNodesListShardStoreMetadata( } @Override - public void list(ShardId shardId, String customDataPath, DiscoveryNode[] nodes, ActionListener listener) { + public void list( + Map shardAttributes, + DiscoveryNode[] nodes, + ActionListener listener + ) { + assert shardAttributes.size() == 1 : "only one shard should be specified"; + final ShardId shardId = shardAttributes.keySet().iterator().next(); + final String customDataPath = shardAttributes.get(shardId).getCustomDataPath(); execute(new Request(shardId, customDataPath, nodes), listener); } diff --git a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java index 31a27503069d7..4e5e9c71e1fe4 100644 --- a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java +++ b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.store.ShardAttributes; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -46,12 +47,13 @@ import org.junit.Before; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; -import static java.util.Collections.emptySet; +import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; @@ -84,7 +86,16 @@ public class AsyncShardFetchTests extends OpenSearchTestCase { public void setUp() throws Exception { super.setUp(); this.threadPool = new TestThreadPool(getTestName()); - this.test = new TestFetch(threadPool); + if (randomBoolean()) { + this.test = new TestFetch(threadPool); + } else { + HashMap shardToCustomDataPath = new HashMap<>(); + ShardId shardId0 = new ShardId("index1", "index_uuid1", 0); + ShardId shardId1 = new ShardId("index2", "index_uuid2", 0); + shardToCustomDataPath.put(shardId0, new ShardAttributes(shardId0, "")); + shardToCustomDataPath.put(shardId1, new ShardAttributes(shardId1, "")); + this.test = new TestFetch(threadPool, shardToCustomDataPath); + } } @After @@ -97,7 +108,7 @@ public void testClose() throws Exception { test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -107,7 +118,7 @@ public void testClose() throws Exception { assertThat(test.reroute.get(), equalTo(1)); test.close(); try { - test.fetchData(nodes, emptySet()); + test.fetchData(nodes, emptyMap()); fail("fetch data should fail when closed"); } catch (IllegalStateException e) { // all is well @@ -119,7 +130,7 @@ public void testFullCircleSingleNodeSuccess() throws Exception { test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -127,7 +138,7 @@ public void testFullCircleSingleNodeSuccess() throws Exception { test.fireSimulationAndWait(node1.getId()); // verify we get back the data node assertThat(test.reroute.get(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -139,7 +150,7 @@ public void testFullCircleSingleNodeFailure() throws Exception { test.addSimulation(node1.getId(), failure1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -147,19 +158,19 @@ public void testFullCircleSingleNodeFailure() throws Exception { test.fireSimulationAndWait(node1.getId()); // failure, fetched data exists, but has no data assertThat(test.reroute.get(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(0)); // on failure, we reset the failure on a successive call to fetchData, and try again afterwards test.addSimulation(node1.getId(), response1); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); test.fireSimulationAndWait(node1.getId()); // 2 reroutes, cause we have a failure that we clear assertThat(test.reroute.get(), equalTo(3)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -170,7 +181,7 @@ public void testIgnoreResponseFromDifferentRound() throws Exception { test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -183,7 +194,7 @@ public void testIgnoreResponseFromDifferentRound() throws Exception { test.fireSimulationAndWait(node1.getId()); // verify we get back the data node assertThat(test.reroute.get(), equalTo(2)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -195,7 +206,7 @@ public void testIgnoreFailureFromDifferentRound() throws Exception { test.addSimulation(node1.getId(), failure1); // first fetch, no data, still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -212,7 +223,7 @@ public void testIgnoreFailureFromDifferentRound() throws Exception { test.fireSimulationAndWait(node1.getId()); // failure, fetched data exists, but has no data assertThat(test.reroute.get(), equalTo(2)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(0)); } @@ -223,7 +234,7 @@ public void testTwoNodesOnSetup() throws Exception { test.addSimulation(node2.getId(), response2); // no fetched data, 2 requests still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -231,14 +242,14 @@ public void testTwoNodesOnSetup() throws Exception { test.fireSimulationAndWait(node1.getId()); // there is still another on going request, so no data assertThat(test.getNumberOfInFlightFetches(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); // no more ongoing requests, we should fetch the data assertThat(test.reroute.get(), equalTo(2)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(2)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -251,21 +262,21 @@ public void testTwoNodesOnSetupAndFailure() throws Exception { test.addSimulation(node2.getId(), failure2); // no fetched data, 2 requests still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); // fire the first response, it should trigger a reroute test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(1)); - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); assertThat(test.reroute.get(), equalTo(2)); // since one of those failed, we should only have one entry - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -276,7 +287,7 @@ public void testTwoNodesAddedInBetween() throws Exception { test.addSimulation(node1.getId(), response1); // no fetched data, 2 requests still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -287,14 +298,14 @@ public void testTwoNodesAddedInBetween() throws Exception { nodes = DiscoveryNodes.builder(nodes).add(node2).build(); test.addSimulation(node2.getId(), response2); // no fetch data, has a new node introduced - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); // since one of those failed, we should only have one entry - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(2)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -309,7 +320,7 @@ public void testClearCache() throws Exception { test.clearCacheForNode(node1.getId()); // no fetched data, request still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -317,13 +328,13 @@ public void testClearCache() throws Exception { assertThat(test.reroute.get(), equalTo(1)); // verify we get back right data from node - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); // second fetch gets same data - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); @@ -334,14 +345,14 @@ public void testClearCache() throws Exception { test.addSimulation(node1.getId(), response1_2); // no fetched data, new request on going - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(2)); // verify we get new data back - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1_2)); @@ -352,7 +363,7 @@ public void testConcurrentRequestAndClearCache() throws Exception { test.addSimulation(node1.getId(), response1); // no fetched data, request still on going - AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptySet()); + AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); @@ -366,14 +377,14 @@ public void testConcurrentRequestAndClearCache() throws Exception { test.addSimulation(node1.getId(), response1_2); // verify still no fetched data, request still on going - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(2)); // verify we get new data back - fetchData = test.fetchData(nodes, emptySet()); + fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1_2)); @@ -403,6 +414,11 @@ static class Entry { this.threadPool = threadPool; } + TestFetch(ThreadPool threadPool, Map shardAttributesMap) { + super(LogManager.getLogger(TestFetch.class), "test", shardAttributesMap, null, "test-batch"); + this.threadPool = threadPool; + } + public void addSimulation(String nodeId, Response response) { simulations.put(nodeId, new Entry(response, null)); } @@ -418,7 +434,7 @@ public void fireSimulationAndWait(String nodeId) throws InterruptedException { } @Override - protected void reroute(ShardId shardId, String reason) { + protected void reroute(String shardId, String reason) { reroute.incrementAndGet(); } diff --git a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java index c31ce60cb96a1..dceda6433575c 100644 --- a/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/PrimaryShardAllocatorTests.java @@ -857,7 +857,11 @@ protected AsyncShardFetch.FetchResult(shardId, data, Collections.emptySet()); + return new AsyncShardFetch.FetchResult<>(data, new HashMap<>() { + { + put(shardId, Collections.emptySet()); + } + }); } } } diff --git a/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java b/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java index 3eeebd8cab6e4..5833d9c4f187f 100644 --- a/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java +++ b/server/src/test/java/org/opensearch/gateway/ReplicaShardAllocatorTests.java @@ -728,7 +728,11 @@ protected AsyncShardFetch.FetchResult(shardId, tData, Collections.emptySet()); + return new AsyncShardFetch.FetchResult<>(tData, new HashMap<>() { + { + put(shardId, Collections.emptySet()); + } + }); } @Override diff --git a/server/src/test/java/org/opensearch/indices/store/ShardAttributesTests.java b/server/src/test/java/org/opensearch/indices/store/ShardAttributesTests.java new file mode 100644 index 0000000000000..7fa95fefe72fd --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/store/ShardAttributesTests.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.store; + +import org.opensearch.core.common.io.stream.DataOutputStreamOutput; +import org.opensearch.core.common.io.stream.InputStreamStreamInput; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +public class ShardAttributesTests extends OpenSearchTestCase { + + Index index = new Index("index", "test-uid"); + ShardId shardId = new ShardId(index, 0); + String customDataPath = "/path/to/data"; + + public void testShardAttributesConstructor() { + ShardAttributes attributes = new ShardAttributes(shardId, customDataPath); + assertEquals(attributes.getShardId(), shardId); + assertEquals(attributes.getCustomDataPath(), customDataPath); + } + + public void testSerialization() throws IOException { + ShardAttributes attributes1 = new ShardAttributes(shardId, customDataPath); + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + StreamOutput output = new DataOutputStreamOutput(new DataOutputStream(bytes)); + attributes1.writeTo(output); + output.close(); + StreamInput input = new InputStreamStreamInput(new ByteArrayInputStream(bytes.toByteArray())); + ShardAttributes attributes2 = new ShardAttributes(input); + input.close(); + assertEquals(attributes1.getShardId(), attributes2.getShardId()); + assertEquals(attributes1.getCustomDataPath(), attributes2.getCustomDataPath()); + } + +} diff --git a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java index 7462062a0cd46..f123b926f5bad 100644 --- a/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/opensearch/test/gateway/TestGatewayAllocator.java @@ -98,7 +98,11 @@ protected AsyncShardFetch.FetchResult fetchData(ShardR ) ); - return new AsyncShardFetch.FetchResult<>(shardId, foundShards, ignoreNodes); + return new AsyncShardFetch.FetchResult<>(foundShards, new HashMap<>() { + { + put(shardId, ignoreNodes); + } + }); } }; @@ -111,7 +115,11 @@ private ReplicationCheckpoint getReplicationCheckpoint(ShardId shardId, String n protected AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation) { // for now, just pretend no node has data final ShardId shardId = shard.shardId(); - return new AsyncShardFetch.FetchResult<>(shardId, Collections.emptyMap(), allocation.getIgnoreNodes(shardId)); + return new AsyncShardFetch.FetchResult<>(Collections.emptyMap(), new HashMap<>() { + { + put(shardId, allocation.getIgnoreNodes(shardId)); + } + }); } @Override From c52d4a351c0b530180d03ea3f2fcb914d6dc1776 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 2 Jan 2024 18:15:45 -0500 Subject: [PATCH 25/81] Fix issue when calling Delete PIT endpoint and no PITs exist (#11711) Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../opensearch/action/search/PitService.java | 1 + .../search/TransportDeletePitActionTests.java | 42 +++++++++++++++++++ 3 files changed, 44 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 579d2695d3125..a167127a7d795 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -204,6 +204,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) - Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) - Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) +- Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) ### Security diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index ed12938883e48..b6480ce63f827 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -71,6 +71,7 @@ public void deletePitContexts( ) { if (nodeToContextsMap.size() == 0) { listener.onResponse(new DeletePitResponse(Collections.emptyList())); + return; } final Set clusters = nodeToContextsMap.values() .stream() diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 8d3cdc070c695..a1e3a2b03caf7 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -21,6 +21,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.tasks.TaskId; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -33,6 +34,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterConnectionTests; import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; import org.junit.Before; import java.util.ArrayList; @@ -262,6 +264,46 @@ public void getAllPits(ActionListener getAllPitsListener } } + public void testDeleteAllPITSuccessWhenNoPITsExist() throws InterruptedException, ExecutionException { + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT)) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + TransportService mockTransportService = mock(TransportService.class); + PitService pitService = new PitService(clusterServiceMock, mock(SearchTransportService.class), mockTransportService, client) { + @Override + public void getAllPits(ActionListener getAllPitsListener) { + List list = new ArrayList<>(); + GetAllPitNodeResponse getAllPitNodeResponse = new GetAllPitNodeResponse(cluster1Transport.getLocalDiscoNode(), list); + List nodeList = new ArrayList(); + nodeList.add(getAllPitNodeResponse); + getAllPitsListener.onResponse(new GetAllPitNodesResponse(new ClusterName("cn"), nodeList, new ArrayList())); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + mockTransportService, + actionFilters, + namedWriteableRegistry, + pitService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DeletePitResponse deletePitResponse) { + assertEquals(RestStatus.OK, deletePitResponse.status()); + assertEquals(0, deletePitResponse.getDeletePitResults().size()); + } + + @Override + public void onFailure(Exception e) { + fail("Should not receive Exception"); + } + }; + action.execute(task, deletePITRequest, listener); + } + } + public void testDeletePitWhenNodeIsDown() throws InterruptedException, ExecutionException { List deleteNodesInvoked = new CopyOnWriteArrayList<>(); ActionFilters actionFilters = mock(ActionFilters.class); From 7b1c2c78c16d566da93001d0f6a8224b2de9e8d0 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Tue, 2 Jan 2024 16:04:40 -0800 Subject: [PATCH 26/81] Implementation for match_only_text field (#11039) * Implementation for match_only_text field Signed-off-by: Rishabh Maurya * Fix build failures Signed-off-by: Rishabh Maurya * Fix bugs Signed-off-by: Rishabh Maurya * Added mapper tests, stil failing on prefix and phrase tests Signed-off-by: Rishabh Maurya * Disable index prefix and phrase mapper Signed-off-by: Rishabh Maurya * Added unit tests for phrase and multiphrase query validation Signed-off-by: Rishabh Maurya * Add unit tests for prefix and prefix phrase queries Signed-off-by: Rishabh Maurya * Add a test to cover 3 word with synonym match phrase prefix query Signed-off-by: Rishabh Maurya * Add unit test for SourceFieldMatchQuery Signed-off-by: Rishabh Maurya * Added test for _source disabled case Signed-off-by: Rishabh Maurya * Add unit test for missing field Signed-off-by: Rishabh Maurya * more validation tests and changelog update Signed-off-by: Rishabh Maurya * Added integration tests for match_only_text replicating text field integ tests Signed-off-by: Rishabh Maurya * Added skip section in integ test to fix mixed cluster failures Signed-off-by: Rishabh Maurya * remove unused import Signed-off-by: Rishabh Maurya * Address PR comments Signed-off-by: Rishabh Maurya * fix integ tests Signed-off-by: Rishabh Maurya * Fix flaky test due to random indexwriter Signed-off-by: Rishabh Maurya * pr comment: header modification Signed-off-by: Rishabh Maurya * Address PR comments Signed-off-by: Rishabh Maurya * addded change to the right section of CHANGELOG Signed-off-by: Rishabh Maurya * overriding the textFieldType before every test Signed-off-by: Rishabh Maurya * rename @Before method Signed-off-by: Rishabh Maurya * update changelog description Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya --- CHANGELOG.md | 1 + .../11_match_field_match_only_text.yml | 70 +++ .../20_ngram_search_field_match_only_text.yml | 144 ++++++ ...ram_highligthing_field_match_only_text.yml | 137 ++++++ .../40_query_string_field_match_only_text.yml | 59 +++ ...default_analyzer_field_match_only_text.yml | 42 ++ ...es_with_synonyms_field_match_only_text.yml | 348 ++++++++++++++ ...60_synonym_graph_field_match_only_text.yml | 209 ++++++++ .../70_intervals_field_match_only_text.yml | 67 +++ .../20_phrase_field_match_only_text.yml | 238 +++++++++ .../20_highlighting_field_match_only_text.yml | 201 ++++++++ .../20_query_string_field_match_only_text.yml | 53 +++ .../30_sig_terms_field_match_only_text.yml | 76 +++ .../90_sig_text_field_match_only_text.yml | 155 ++++++ .../20_highlighting_field_match_only_text.yml | 137 ++++++ .../160_exists_query_match_only_text.yml | 119 +++++ ...00_phrase_search_field_match_only_text.yml | 67 +++ ...atch_bool_prefix_field_match_only_text.yml | 282 +++++++++++ ...disallow_queries_field_match_only_text.yml | 141 ++++++ .../10_basic_field_match_only_field.yml | 92 ++++ .../index/mapper/MappedFieldType.java | 13 + .../mapper/MatchOnlyTextFieldMapper.java | 312 ++++++++++++ .../index/mapper/TextFieldMapper.java | 35 +- .../index/query/SourceFieldMatchQuery.java | 160 +++++++ .../opensearch/index/search/MatchQuery.java | 10 +- .../index/search/MultiMatchQuery.java | 4 +- .../org/opensearch/indices/IndicesModule.java | 2 + .../MatchOnlyTextFieldAnalyzerModeTests.java | 16 + .../mapper/MatchOnlyTextFieldMapperTests.java | 450 ++++++++++++++++++ .../mapper/MatchOnlyTextFieldTypeTests.java | 31 ++ .../mapper/TextFieldAnalyzerModeTests.java | 22 +- .../index/mapper/TextFieldMapperTests.java | 222 ++++----- .../index/mapper/TextFieldTypeTests.java | 32 +- .../query/SourceFieldMatchQueryTests.java | 173 +++++++ .../index/mapper/MapperServiceTestCase.java | 4 +- .../aggregations/AggregatorTestCase.java | 4 +- 36 files changed, 3959 insertions(+), 169 deletions(-) create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml create mode 100644 modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml create mode 100644 modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml create mode 100644 server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java create mode 100644 server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldAnalyzerModeTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java create mode 100644 server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java create mode 100644 server/src/test/java/org/opensearch/index/query/SourceFieldMatchQueryTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index a167127a7d795..026606ff57d65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Create separate transport action for render search template action ([#11170](https://github.com/opensearch-project/OpenSearch/pull/11170)) - Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) - Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) +- Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) ### Dependencies - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml new file mode 100644 index 0000000000000..40ff2c2f4cdbe --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml @@ -0,0 +1,70 @@ +# integration tests for queries with specific analysis chains + +"match query with stacked stems": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + # Tests the match query stemmed tokens are "stacked" on top of the unstemmed + # versions in the same position. + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + analysis: + analyzer: + index: + tokenizer: standard + filter: [lowercase] + search: + rest_total_hits_as_int: true + tokenizer: standard + filter: [lowercase, keyword_repeat, porter_stem, unique_stem] + filter: + unique_stem: + type: unique + only_on_same_position: true + mappings: + properties: + text: + type: match_only_text + analyzer: index + search_analyzer: search + + - do: + index: + index: test + id: 1 + body: { "text": "the fox runs across the street" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fox runs + operator: AND + - match: {hits.total: 1} + + - do: + index: + index: test + id: 2 + body: { "text": "run fox run" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fox runs + operator: AND + - match: {hits.total: 2} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml new file mode 100644 index 0000000000000..95b648dee47c8 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml @@ -0,0 +1,144 @@ +"ngram search": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + analysis: + analyzer: + my_analyzer: + tokenizer: standard + filter: [my_ngram] + filter: + my_ngram: + type: ngram + min: 2, + max: 2 + mappings: + properties: + text: + type: match_only_text + analyzer: my_analyzer + + - do: + index: + index: test + id: 1 + body: { "text": "foo bar baz" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: foa + - match: {hits.total: 1} + +--- +"testNGramCopyField": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + max_ngram_diff: 9 + analysis: + analyzer: + my_ngram_analyzer: + tokenizer: my_ngram_tokenizer + tokenizer: + my_ngram_tokenizer: + type: ngram + min: 1, + max: 10 + token_chars: [] + mappings: + properties: + origin: + type: match_only_text + copy_to: meta + meta: + type: match_only_text + analyzer: my_ngram_analyzer + + - do: + index: + index: test + id: 1 + body: { "origin": "C.A1234.5678" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: 1234 + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: 1234.56 + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: A1234 + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + term: + meta: + value: a1234 + - match: {hits.total: 0} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: A1234 + analyzer: my_ngram_analyzer + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + meta: + query: a1234 + analyzer: my_ngram_analyzer + - match: {hits.total: 1} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml new file mode 100644 index 0000000000000..597f55679a2c6 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml @@ -0,0 +1,137 @@ +"ngram highlighting": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index.max_ngram_diff: 19 + analysis: + tokenizer: + my_ngramt: + type: ngram + min_gram: 1 + max_gram: 20 + token_chars: letter,digit + filter: + my_ngram: + type: ngram + min_gram: 1 + max_gram: 20 + analyzer: + name2_index_analyzer: + tokenizer: whitespace + filter: [my_ngram] + name_index_analyzer: + tokenizer: my_ngramt + name_search_analyzer: + tokenizer: whitespace + mappings: + properties: + name: + type: match_only_text + term_vector: with_positions_offsets + analyzer: name_index_analyzer + search_analyzer: name_search_analyzer + name2: + type: match_only_text + term_vector: with_positions_offsets + analyzer: name2_index_analyzer + search_analyzer: name_search_analyzer + + - do: + index: + index: test + id: 1 + refresh: true + body: + name: logicacmg ehemals avinci - the know how company + name2: logicacmg ehemals avinci - the know how company + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name: + query: logica m + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name: + query: logica ma + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name: + query: logica + highlight: + fields: + - name: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name2: + query: logica m + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name2: + query: logica ma + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "logicacmg ehemals avinci - the know how company"} + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + name2: + query: logica + highlight: + fields: + - name2: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.name2.0: "logicacmg ehemals avinci - the know how company"} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml new file mode 100644 index 0000000000000..ddebb1d76acbc --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml @@ -0,0 +1,59 @@ +--- +"Test query string with snowball": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + mappings: + properties: + field: + type: match_only_text + number: + type: integer + + - do: + index: + index: test + id: 1 + body: { field: foo bar} + + - do: + indices.refresh: + index: [test] + + - do: + indices.validate_query: + index: test + q: field:bars + analyzer: snowball + + - is_true: valid + + - do: + search: + rest_total_hits_as_int: true + index: test + q: field:bars + analyzer: snowball + + - match: {hits.total: 1} + + - do: + explain: + index: test + id: 1 + q: field:bars + analyzer: snowball + + - is_true: matched + + - do: + count: + index: test + q: field:bars + analyzer: snowball + + - match: {count : 1} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml new file mode 100644 index 0000000000000..97f3fb65e94a2 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml @@ -0,0 +1,42 @@ +--- +"Test default search analyzer is applied": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + settings: + index.analysis.analyzer.default.type: simple + index.analysis.analyzer.default_search.type: german + mappings: + properties: + body: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: + body: Ich lese die Bücher + + - do: + indices.refresh: + index: [ test ] + + - do: + search: + index: test + q: "body:Bücher" + + - match: { hits.total.value: 0 } + + - do: + search: + index: test + q: "body:Bücher" + analyzer: simple + + - match: { hits.total.value: 1 } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml new file mode 100644 index 0000000000000..0c263a47a38e6 --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml @@ -0,0 +1,348 @@ +--- +"Test common terms query with stacked tokens": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + features: "allowed_warnings" + + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + syns: + type: synonym + synonyms: [ "quick,fast" ] + analyzer: + syns: + tokenizer: standard + filter: [ "syns" ] + mappings: + properties: + field1: + type: match_only_text + analyzer: syns + field2: + type: match_only_text + analyzer: syns + + - do: + index: + index: test + id: 3 + body: + field1: quick lazy huge brown pidgin + field2: the quick lazy huge brown fox jumps over the tree + + - do: + index: + index: test + id: 1 + body: + field1: the quick brown fox + + - do: + index: + index: test + id: 2 + body: + field1: the quick lazy huge brown fox jumps over the tree + refresh: true + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast brown + cutoff_frequency: 3 + low_freq_operator: or + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast brown + cutoff_frequency: 3 + low_freq_operator: and + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast brown + cutoff_frequency: 3 + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast huge fox + minimum_should_match: + low_freq: 3 + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast lazy fox brown + cutoff_frequency: 1 + minimum_should_match: + high_freq: 5 + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "1" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast lazy fox brown + cutoff_frequency: 1 + minimum_should_match: + high_freq: 6 + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the fast lazy fox brown + cutoff_frequency: 1 + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [common] used, replaced by [[match] query which can efficiently skip blocks of documents if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + common: + field1: + query: the quick brown + cutoff_frequency: 3 + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + match: + field1: + query: the fast brown + cutoff_frequency: 3 + operator: and + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + match: + field1: + query: the fast brown + cutoff_frequency: 3 + operator: or + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.2._id: "3" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + match: + field1: + query: the fast brown + cutoff_frequency: 3 + minimum_should_match: 3 + - match: { hits.total: 2 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.1._id: "2" } + + - do: + allowed_warnings: + - 'Deprecated field [cutoff_frequency] used, replaced by [you can omit this option, the [multi_match] query can skip block of documents efficiently if the total number of hits is not tracked]' + search: + rest_total_hits_as_int: true + body: + query: + multi_match: + query: the fast brown + fields: [ "field1", "field2" ] + cutoff_frequency: 3 + operator: and + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.2._id: "2" } + +--- +"Test match query with synonyms - see #3881 for extensive description of the issue": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + settings: + analysis: + filter: + synonym: + type: synonym + synonyms: [ "quick,fast" ] + analyzer: + index: + type: custom + tokenizer: standard + filter: lowercase + search: + rest_total_hits_as_int: true + type: custom + tokenizer: standard + filter: [ lowercase, synonym ] + mappings: + properties: + text: + type: match_only_text + analyzer: index + search_analyzer: search + + - do: + index: + index: test + id: 1 + body: + text: quick brown fox + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick + operator: and + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick brown + operator: and + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fast + operator: and + - match: { hits.total: 1 } + + - do: + index: + index: test + id: 2 + body: + text: fast brown fox + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick + operator: and + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: quick brown + operator: and + - match: { hits.total: 2 } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml new file mode 100644 index 0000000000000..91a8b1509517e --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml @@ -0,0 +1,209 @@ +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + settings: + index: + number_of_shards: 1 # keep scoring stable + analysis: + filter: + syns: + type: synonym + synonyms: [ "wtf, what the fudge", "foo, bar baz" ] + graph_syns: + type: synonym_graph + synonyms: [ "wtf, what the fudge", "foo, bar baz" ] + analyzer: + lower_syns: + type: custom + tokenizer: standard + filter: [ lowercase, syns ] + lower_graph_syns: + type: custom + tokenizer: standard + filter: [ lowercase, graph_syns ] + mappings: + properties: + field: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: + text: say wtf happened foo + - do: + index: + index: test + id: 2 + body: + text: bar baz what the fudge man + + - do: + index: + index: test + id: 3 + body: + text: wtf + + - do: + index: + index: test + id: 4 + body: + text: what is the name for fudge + + - do: + index: + index: test + id: 5 + body: + text: bar two three + + - do: + index: + index: test + id: 6 + body: + text: bar baz two three + refresh: true + +--- +"simple multiterm phrase": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match_phrase: + text: + query: foo two three + analyzer: lower_syns + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "5" } # incorrect match because we're not using graph synonyms + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match_phrase: + text: + query: foo two three + analyzer: lower_graph_syns + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "6" } # correct match because we're using graph synonyms + +--- +"simple multiterm and": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: say what the fudge + analyzer: lower_syns + operator: and + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } # non-graph synonyms coincidentally give us the correct answer here + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: say what the fudge + analyzer: lower_graph_syns + operator: and + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + +--- +"minimum should match": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: three what the fudge foo + operator: or + analyzer: lower_graph_syns + auto_generate_synonyms_phrase_query: false + - match: { hits.total: 6 } + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: three what the fudge foo + operator: or + analyzer: lower_graph_syns + minimum_should_match: 80% + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "6" } + - match: { hits.hits.2._id: "1" } + +--- +"multiterm synonyms phrase": + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: wtf + operator: and + analyzer: lower_graph_syns + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.2._id: "1" } + +--- +"phrase prefix": + - do: + index: + index: test + id: 7 + body: + text: "WTFD!" + + - do: + index: + index: test + id: 8 + body: + text: "Weird Al's WHAT THE FUDGESICLE" + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match_phrase_prefix: + text: + query: wtf + analyzer: lower_graph_syns + - match: { hits.total: 5 } + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.1._id: "7" } + - match: { hits.hits.2._id: "1" } + - match: { hits.hits.3._id: "8" } + - match: { hits.hits.4._id: "2" } diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml new file mode 100644 index 0000000000000..9792c9d2695ea --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml @@ -0,0 +1,67 @@ +# integration tests for intervals queries using analyzers +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: match_only_text + analyzer: standard + text_en: + type: match_only_text + analyzer: english + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id": "4"}}' + - '{"text" : "Outside it is cold and wet and raining cats and dogs", + "text_en" : "Outside it is cold and wet and raining cats and dogs"}' + +--- +"Test use_field": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + catch: bad_request + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: cats + - match: + query: dog + max_gaps: 1 + - match: { status: 400 } + - match: { error.type: "search_phase_execution_exception"} + - match: { error.reason: "all shards failed"} + - do: + catch: bad_request + search: + index: test + body: + query: + intervals: + text: + all_of: + intervals: + - match: + query: cats + - match: + query: dog + use_field: text_en + max_gaps: 1 + - match: { status: 400 } + - match: { error.type: "search_phase_execution_exception"} + - match: { error.reason: "all shards failed"} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml new file mode 100644 index 0000000000000..aff2b3f11101c --- /dev/null +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml @@ -0,0 +1,238 @@ +# Integration tests for the phrase suggester with a few analyzers + +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + analysis: + analyzer: + body: + tokenizer: standard + filter: [lowercase] + bigram: + tokenizer: standard + filter: [lowercase, bigram] + ngram: + tokenizer: standard + filter: [lowercase, ngram] + reverse: + tokenizer: standard + filter: [lowercase, reverse] + filter: + bigram: + type: shingle + output_unigrams: false + min_shingle_size: 2 + max_shingle_size: 2 + ngram: + type: shingle + output_unigrams: true + min_shingle_size: 2 + max_shingle_size: 2 + mappings: + properties: + body: + type: match_only_text + analyzer: body + fields: + bigram: + type: match_only_text + analyzer: bigram + ngram: + type: match_only_text + analyzer: ngram + reverse: + type: match_only_text + analyzer: reverse + + - do: + bulk: + index: test + refresh: true + body: | + { "index": {} } + { "body": "Xorr the God-Jewel" } + { "index": {} } + { "body": "Xorn" } + { "index": {} } + { "body": "Arthur, King of the Britons" } + { "index": {} } + { "body": "Sir Lancelot the Brave" } + { "index": {} } + { "body": "Patsy, Arthur's Servant" } + { "index": {} } + { "body": "Sir Robin the Not-Quite-So-Brave-as-Sir-Lancelot" } + { "index": {} } + { "body": "Sir Bedevere the Wise" } + { "index": {} } + { "body": "Sir Galahad the Pure" } + { "index": {} } + { "body": "Miss Islington, the Witch" } + { "index": {} } + { "body": "Zoot" } + { "index": {} } + { "body": "Leader of Robin's Minstrels" } + { "index": {} } + { "body": "Old Crone" } + { "index": {} } + { "body": "Frank, the Historian" } + { "index": {} } + { "body": "Frank's Wife" } + { "index": {} } + { "body": "Dr. Piglet" } + { "index": {} } + { "body": "Dr. Winston" } + { "index": {} } + { "body": "Sir Robin (Stand-in)" } + { "index": {} } + { "body": "Knight Who Says Ni" } + { "index": {} } + { "body": "Police sergeant who stops the film" } + +--- +"sorts by score": + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.ngram + force_unigrams: true + max_errors: 0.5 + direct_generator: + - field: body.ngram + min_word_length: 1 + suggest_mode: always + + - match: {suggest.test.0.options.0.text: xorr the god jewel} + - match: {suggest.test.0.options.1.text: xorn the god jewel} + +--- +"breaks ties by sorting terms": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + # This runs the suggester without bigrams so we can be sure of the sort order + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body + analyzer: body + force_unigrams: true + max_errors: 0.5 + direct_generator: + - field: body + min_word_length: 1 + suggest_mode: always + + # The scores are identical but xorn comes first because it sorts first + - match: {suggest.test.0.options.0.text: xorn the god jewel} + - match: {suggest.test.0.options.1.text: xorr the god jewel} + - match: {suggest.test.0.options.0.score: $body.suggest.test.0.options.0.score} + +--- +"fails when asked to run on a field without unigrams": + - do: + catch: /since it doesn't emit unigrams/ + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + + - do: + catch: /since it doesn't emit unigrams/ + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + analyzer: bigram + +--- +"doesn't fail when asked to run on a field without unigrams when force_unigrams=false": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + force_unigrams: false + + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: xor the got-jewel + test: + phrase: + field: body.bigram + analyzer: bigram + force_unigrams: false + +--- +"reverse suggestions": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + search: + rest_total_hits_as_int: true + size: 0 + index: test + body: + suggest: + text: Artur, Ging of the Britons + test: + phrase: + field: body.ngram + force_unigrams: true + max_errors: 0.5 + direct_generator: + - field: body.reverse + min_word_length: 1 + suggest_mode: always + pre_filter: reverse + post_filter: reverse + + - match: {suggest.test.0.options.0.text: arthur king of the britons} diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml new file mode 100644 index 0000000000000..3cb8e09c70aed --- /dev/null +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml @@ -0,0 +1,201 @@ +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + a_field: + type: search_as_you_type + analyzer: simple + max_shingle_size: 4 + text_field: + type: match_only_text + analyzer: simple + + - do: + index: + index: test + id: 1 + body: + a_field: "quick brown fox jump lazy dog" + text_field: "quick brown fox jump lazy dog" + + - do: + indices.refresh: {} + +--- +"phrase query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick brown fox jump lazy dog" } + +--- +"bool prefix query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fo" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query 1 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fo" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: null } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 2 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 3 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 4 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy d" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: ["quick brown fox jump lazy dog"] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml new file mode 100644 index 0000000000000..085c5633ac72b --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml @@ -0,0 +1,53 @@ +--- +"validate_query with query_string parameters": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + mappings: + properties: + field: + type: match_only_text + number: + type: integer + + - do: + indices.validate_query: + index: test + q: bar + df: field + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: field:foo field:xyz + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: field:foo field:xyz + default_operator: AND + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: field:BA* + + - is_true: valid + + - do: + indices.validate_query: + index: test + q: number:foo + lenient: true + + - is_true: valid diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml new file mode 100644 index 0000000000000..7a96536a2e261 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml @@ -0,0 +1,76 @@ +--- +"Default index": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: goodbad + body: + settings: + number_of_shards: "1" + mappings: + properties: + text: + type: match_only_text + fielddata: true + class: + type: keyword + + - do: + index: + index: goodbad + id: 1 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 2 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 3 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 4 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 5 + body: { text: "good bad", class: "good" } + - do: + index: + index: goodbad + id: 6 + body: { text: "good bad", class: "bad" } + - do: + index: + index: goodbad + id: 7 + body: { text: "bad", class: "bad" } + + + + - do: + indices.refresh: + index: [goodbad] + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + + - match: {hits.total: 7} + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_terms": {"significant_terms": {"field": "text"}}}}}} + + - match: {aggregations.class.buckets.0.sig_terms.buckets.0.key: "bad"} + - match: {aggregations.class.buckets.1.sig_terms.buckets.0.key: "good"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml new file mode 100644 index 0000000000000..bc41f157dfdc4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml @@ -0,0 +1,155 @@ +--- +"Default index": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: goodbad + body: + settings: + number_of_shards: "1" + mappings: + properties: + text: + type: match_only_text + fielddata: false + class: + type: keyword + + - do: + index: + index: goodbad + id: 1 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 2 + body: { text: "good", class: "good" } + - do: + index: + index: goodbad + id: 3 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 4 + body: { text: "bad", class: "bad" } + - do: + index: + index: goodbad + id: 5 + body: { text: "good bad", class: "good" } + - do: + index: + index: goodbad + id: 6 + body: { text: "good bad", class: "bad" } + - do: + index: + index: goodbad + id: 7 + body: { text: "bad", class: "bad" } + + + + - do: + indices.refresh: + index: [goodbad] + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + + - match: {hits.total: 7} + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text"}}}}}} + + - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} + - match: {aggregations.class.buckets.1.sig_text.buckets.0.key: "good"} + +--- +"Dedup noise": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: goodbad + body: + settings: + number_of_shards: "1" + mappings: + properties: + text: + type: match_only_text + fielddata: false + class: + type: keyword + + - do: + index: + index: goodbad + id: 1 + body: { text: "good noisewords1 g1 g2 g3 g4 g5 g6", class: "good" } + - do: + index: + index: goodbad + id: 2 + body: { text: "good noisewords2 g1 g2 g3 g4 g5 g6", class: "good" } + - do: + index: + index: goodbad + id: 3 + body: { text: "bad noisewords3 b1 b2 b3 b4 b5 b6", class: "bad" } + - do: + index: + index: goodbad + id: 4 + body: { text: "bad noisewords4 b1 b2 b3 b4 b5 b6", class: "bad" } + - do: + index: + index: goodbad + id: 5 + body: { text: "good bad noisewords5 gb1 gb2 gb3 gb4 gb5 gb6", class: "good" } + - do: + index: + index: goodbad + id: 6 + body: { text: "good bad noisewords6 gb1 gb2 gb3 gb4 gb5 gb6", class: "bad" } + - do: + index: + index: goodbad + id: 7 + body: { text: "bad noisewords7 b1 b2 b3 b4 b5 b6", class: "bad" } + + + + - do: + indices.refresh: + index: [goodbad] + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + + - match: {hits.total: 7} + + - do: + search: + rest_total_hits_as_int: true + index: goodbad + body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text", "filter_duplicate_text": true}}}}}} + + - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} + - length: { aggregations.class.buckets.0.sig_text.buckets: 1 } + - match: {aggregations.class.buckets.1.sig_text.buckets.0.key: "good"} + - length: { aggregations.class.buckets.1.sig_text.buckets: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml new file mode 100644 index 0000000000000..7100d620bf19e --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml @@ -0,0 +1,137 @@ +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + - do: + indices.create: + index: test + body: + mappings: + _source: + excludes: ["nested.stored_only"] + properties: + nested: + type: nested + properties: + field: + type: text + fields: + vectors: + type: text + term_vector: "with_positions_offsets" + postings: + type: text + index_options: "offsets" + stored: + type: match_only_text + store: true + stored_only: + type: match_only_text + store: true + - do: + index: + index: test + id: 1 + refresh: true + body: + nested: + field : "The quick brown fox is brown." + stored : "The quick brown fox is brown." + stored_only : "The quick brown fox is brown." + +--- +"Unified highlighter": + - do: + search: + index: test + body: + query: + nested: + path: "nested" + query: + multi_match: + query: "quick brown fox" + fields: [ "nested.field", "nested.field.vectors", "nested.field.postings" ] + inner_hits: + highlight: + type: "unified" + fields: + nested.field: {} + nested.field.vectors: {} + nested.field.postings: {} + + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field.0: "The quick brown fox is brown." } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field\.vectors.0: "The quick brown fox is brown." } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field\.postings.0: "The quick brown fox is brown." } + +--- +"Unified highlighter with stored fields": + - do: + search: + index: test + body: + query: + nested: + path: "nested" + query: + multi_match: + query: "quick brown fox" + fields: [ "nested.stored", "nested.stored_only" ] + inner_hits: + highlight: + type: "unified" + fields: + nested.stored: {} + nested.stored_only: {} + + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.stored.0: "The quick brown fox is brown." } + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.stored_only.0: "The quick brown fox is brown." } + +--- +"Unified highlighter with stored fields and disabled source": + - do: + indices.create: + index: disabled_source + body: + mappings: + _source: + enabled: false + properties: + nested: + type: nested + properties: + field: + type: match_only_text + stored_only: + type: match_only_text + store: true + - do: + index: + index: disabled_source + id: 1 + refresh: true + body: + nested: + field: "The quick brown fox is brown." + stored_only: "The quick brown fox is brown." + + - do: + search: + index: disabled_source + body: + query: + nested: + path: "nested" + query: + multi_match: + query: "quick brown fox" + fields: ["nested.field", "nested.stored_only"] + inner_hits: + highlight: + type: "unified" + fields: + nested.field: {} + nested.stored_only: {} + + - is_false: hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.field + - match: { hits.hits.0.inner_hits.nested.hits.hits.0.highlight.nested\.stored_only.0: "The quick brown fox is brown."} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml new file mode 100644 index 0000000000000..03626236604a1 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml @@ -0,0 +1,119 @@ +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + features: ["headers"] + + - do: + indices.create: + index: test + body: + mappings: + dynamic: false + properties: + match_only_text: + type: match_only_text + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 1 + body: + match_only_text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 2 + body: + match_only_text: "foo bar" + + - do: + headers: + Content-Type: application/json + index: + index: "test" + id: 3 + routing: "route_me" + body: + match_only_text: "foo bar" + + - do: + index: + index: "test" + id: 4 + body: {} + + - do: + indices.create: + index: test-unmapped + body: + mappings: + dynamic: false + properties: + unrelated: + type: keyword + + - do: + index: + index: "test-unmapped" + id: 1 + body: + unrelated: "foo" + + - do: + indices.create: + index: test-empty + body: + mappings: + dynamic: false + properties: + match_only_text: + type: match_only_text + + - do: + indices.refresh: + index: [test, test-unmapped, test-empty] + +--- +"Test exists query on mapped match_only_text field": + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + exists: + field: match_only_text + + - match: {hits.total: 3} + +--- +"Test exists query on unmapped match_only_text field": + - do: + search: + rest_total_hits_as_int: true + index: test-unmapped + body: + query: + exists: + field: match_only_text + + - match: {hits.total: 0} + +--- +"Test exists query on match_only_text field in empty index": + - do: + search: + rest_total_hits_as_int: true + index: test-empty + body: + query: + exists: + field: match_only_text + + - match: {hits.total: 0} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml new file mode 100644 index 0000000000000..a41b8d353e3e9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml @@ -0,0 +1,67 @@ +--- +"search with indexed phrases": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: { text: "peter piper picked a peck of pickled peppers" } + + - do: + indices.refresh: + index: [test] + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + text: + query: "peter piper" + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test + q: '"peter piper"~1' + df: text + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + text: "peter piper picked" + + - match: {hits.total: 1} + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + text: "piper" + + - match: {hits.total: 1} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml new file mode 100644 index 0000000000000..fc4e9f9de0f38 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml @@ -0,0 +1,282 @@ +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + + - do: + indices.create: + index: test + body: + mappings: + properties: + my_field1: + type: match_only_text + my_field2: + type: match_only_text + + - do: + index: + index: test + id: 1 + body: + my_field1: "brown fox jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 2 + body: + my_field1: "brown emu jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 3 + body: + my_field1: "jumparound" + my_field2: "emu" + + - do: + index: + index: test + id: 4 + body: + my_field1: "dog" + my_field2: "brown fox jump lazy" + + - do: + indices.refresh: {} + +--- +"minimum should match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + minimum_should_match: 3 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "BROWN dog" + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + +--- +"operator": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + operator: AND + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field2: + query: "xylophoen foo" + fuzziness: 1 + prefix_length: 1 + max_expansions: 10 + fuzzy_transpositions: true + fuzzy_rewrite: constant_score + + - match: { hits.total: 2 } + - match: { hits.hits.0._source.my_field2: "xylophone" } + - match: { hits.hits.1._source.my_field2: "xylophone" } + +--- +"multi_match single field complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + +--- +"multi_match single field partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + +--- +"multi_match multiple fields complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + +--- +"multi_match multiple fields partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump laz" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + +--- +"multi_match multiple fields with analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "BROWN FOX JUMP dog" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with minimum_should_match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + minimum_should_match: 4 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "dob nomatch" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + fuzziness: 1 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with slop throws exception": + + - do: + catch: /\[slop\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + slop: 1 + +--- +"multi_match multiple fields with cutoff_frequency throws exception": + + - do: + catch: /\[cutoff_frequency\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + cutoff_frequency: 0.001 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml new file mode 100644 index 0000000000000..f4faf87eb83cc --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml @@ -0,0 +1,141 @@ +--- +setup: + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: match_only_text + analyzer: standard + fields: + raw: + type: keyword + nested1: + type: nested + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test", "_id": "1"}}' + - '{"text" : "Some like it hot, some like it cold", "nested1": [{"foo": "bar1"}]}' + - '{"index": {"_index": "test", "_id": "2"}}' + - '{"text" : "Its cold outside, theres no kind of atmosphere", "nested1": [{"foo": "bar2"}]}' + - '{"index": {"_index": "test", "_id": "3"}}' + - '{"text" : "Baby its cold there outside", "nested1": [{"foo": "bar3"}]}' + - '{"index": {"_index": "test", "_id": "4"}}' + - '{"text" : "Outside it is cold and wet", "nested1": [{"foo": "bar4"}]}' + +--- +teardown: + + - do: + cluster.put_settings: + body: + transient: + search.allow_expensive_queries: null + +--- +"Test disallow expensive queries": + + ### Check for initial setting = null -> false + - do: + cluster.get_settings: + flat_settings: true + + - is_false: search.allow_expensive_queries + + ### Update setting to false + - do: + cluster.put_settings: + body: + transient: + search.allow_expensive_queries: "false" + flat_settings: true + + - match: {transient: {search.allow_expensive_queries: "false"}} + + ### Prefix + - do: + catch: /\[prefix\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false. For optimised prefix queries on text fields please enable \[index_prefixes\]./ + search: + index: test + body: + query: + prefix: + text: + value: out + + ### Fuzzy + - do: + catch: /\[fuzzy\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + fuzzy: + text: + value: outwide + + ### Regexp + - do: + catch: /\[regexp\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + regexp: + text: + value: .*ou.*id.* + + ### Wildcard + - do: + catch: /\[wildcard\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + wildcard: + text: + value: out?ide + + ### Range on text + - do: + catch: /\[range\] queries on \[text\] or \[keyword\] fields cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + range: + text: + gte: "theres" + + ### Range on keyword + - do: + catch: /\[range\] queries on \[text\] or \[keyword\] fields cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + range: + text.raw: + gte : "Outside it is cold and wet" + + ### Nested + - do: + catch: /\[joining\] queries cannot be executed when \'search.allow_expensive_queries\' is set to false./ + search: + index: test + body: + query: + nested: + path: "nested1" + query: + bool: + must: [{"match" : {"nested1.foo" : "bar2"}}] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml new file mode 100644 index 0000000000000..cc15796e4697f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml @@ -0,0 +1,92 @@ +--- +"Search shards aliases with and without filters": + - skip: + version: " - 2.99.99" + reason: "match_only_text was added in 3.0" + + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + field: + type: match_only_text + aliases: + test_alias_no_filter: {} + test_alias_filter_1: + filter: + term: + field : value1 + test_alias_filter_2: + filter: + term: + field : value2 + + - do: + search_shards: + index: test_alias_no_filter + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - is_true: indices.test_index + - is_false: indices.test_index.filter + - match: { indices.test_index.aliases: [test_alias_no_filter]} + + - do: + search_shards: + index: test_alias_filter_1 + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1] } + - match: { indices.test_index.filter.term.field.value: value1 } + - lte: { indices.test_index.filter.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.term.field.boost: 1.0 } + + - do: + search_shards: + index: ["test_alias_filter_1","test_alias_filter_2"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_filter_2]} + - length: { indices.test_index.filter.bool.should: 2 } + - lte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.bool.should.0.term.field.boost: 1.0 } + - lte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } + - gte: { indices.test_index.filter.bool.should.1.term.field.boost: 1.0 } + - match: { indices.test_index.filter.bool.adjust_pure_negative: true} + - lte: { indices.test_index.filter.bool.boost: 1.0 } + - gte: { indices.test_index.filter.bool.boost: 1.0 } + + - do: + search_shards: + index: "test*" + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_filter_2, test_alias_no_filter]} + - is_false: indices.test_index.filter + + - do: + search_shards: + index: ["test_alias_filter_1","test_alias_no_filter"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_filter_1, test_alias_no_filter]} + - is_false: indices.test_index.filter + + - do: + search_shards: + index: ["test_alias_no_filter"] + + - length: { shards: 1 } + - match: { shards.0.0.index: test_index } + - match: { indices.test_index.aliases: [test_alias_no_filter]} + - is_false: indices.test_index.filter diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index da62ddfd7017d..66d4654e543a2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -359,18 +359,31 @@ public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionInc ); } + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) throws IOException { + return phraseQuery(stream, slop, enablePositionIncrements); + } + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { throw new IllegalArgumentException( "Can only use phrase queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + return multiPhraseQuery(stream, slop, enablePositionIncrements); + } + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { throw new IllegalArgumentException( "Can only use phrase prefix queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + return phrasePrefixQuery(stream, slop, maxExpansions); + } + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { throw new IllegalArgumentException( "Can only use span prefix queries on text fields - not on [" + name + "] which is of type [" + typeName() + "]" diff --git a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java new file mode 100644 index 0000000000000..fb97f8c309a70 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -0,0 +1,312 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.opensearch.Version; +import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.opensearch.index.analysis.IndexAnalyzers; +import org.opensearch.index.analysis.NamedAnalyzer; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.SourceFieldMatchQuery; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; + +/** + * A specialized type of TextFieldMapper which disables the positions and norms to save on storage and executes phrase queries, which requires + * positional data, in a slightly less efficient manner using the {@link org.opensearch.index.query.SourceFieldMatchQuery}. + */ +public class MatchOnlyTextFieldMapper extends TextFieldMapper { + + public static final FieldType FIELD_TYPE = new FieldType(); + public static final String CONTENT_TYPE = "match_only_text"; + private final String indexOptions = FieldMapper.indexOptionToString(FIELD_TYPE.indexOptions()); + private final boolean norms = FIELD_TYPE.omitNorms() == false; + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + static { + FIELD_TYPE.setTokenized(true); + FIELD_TYPE.setStored(false); + FIELD_TYPE.setStoreTermVectors(false); + FIELD_TYPE.setOmitNorms(true); + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE.freeze(); + } + + public static final TypeParser PARSER = new TypeParser((n, c) -> new Builder(n, c.indexVersionCreated(), c.getIndexAnalyzers())); + + protected MatchOnlyTextFieldMapper( + String simpleName, + FieldType fieldType, + MatchOnlyTextFieldType mappedFieldType, + TextFieldMapper.PrefixFieldMapper prefixFieldMapper, + TextFieldMapper.PhraseFieldMapper phraseFieldMapper, + MultiFields multiFields, + CopyTo copyTo, + Builder builder + ) { + + super(simpleName, fieldType, mappedFieldType, prefixFieldMapper, phraseFieldMapper, multiFields, copyTo, builder); + } + + @Override + public ParametrizedFieldMapper.Builder getMergeBuilder() { + return new Builder(simpleName(), this.indexCreatedVersion, this.indexAnalyzers).init(this); + } + + /** + * Builder class for constructing the MatchOnlyTextFieldMapper. + */ + public static class Builder extends TextFieldMapper.Builder { + final Parameter indexOptions = indexOptions(m -> ((MatchOnlyTextFieldMapper) m).indexOptions); + + private static Parameter indexOptions(Function initializer) { + return Parameter.restrictedStringParam("index_options", false, initializer, "docs"); + } + + final Parameter norms = norms(m -> ((MatchOnlyTextFieldMapper) m).norms); + final Parameter indexPhrases = Parameter.boolParam( + "index_phrases", + false, + m -> ((MatchOnlyTextFieldType) m.mappedFieldType).indexPhrases, + false + ).setValidator(v -> { + if (v == true) { + throw new MapperParsingException("Index phrases cannot be enabled on for match_only_text field. Use text field instead"); + } + }); + + final Parameter indexPrefixes = new Parameter<>( + "index_prefixes", + false, + () -> null, + TextFieldMapper::parsePrefixConfig, + m -> Optional.ofNullable(((MatchOnlyTextFieldType) m.mappedFieldType).prefixFieldType) + .map(p -> new PrefixConfig(p.minChars, p.maxChars)) + .orElse(null) + ).acceptsNull().setValidator(v -> { + if (v != null) { + throw new MapperParsingException("Index prefixes cannot be enabled on for match_only_text field. Use text field instead"); + } + }); + + private static Parameter norms(Function initializer) { + return Parameter.boolParam("norms", false, initializer, false) + .setMergeValidator((o, n) -> o == n || (o && n == false)) + .setValidator(v -> { + if (v == true) { + throw new MapperParsingException("Norms cannot be enabled on for match_only_text field"); + } + }); + } + + public Builder(String name, IndexAnalyzers indexAnalyzers) { + super(name, indexAnalyzers); + } + + public Builder(String name, Version indexCreatedVersion, IndexAnalyzers indexAnalyzers) { + super(name, indexCreatedVersion, indexAnalyzers); + } + + @Override + public MatchOnlyTextFieldMapper build(BuilderContext context) { + FieldType fieldType = TextParams.buildFieldType(index, store, indexOptions, norms, termVectors); + MatchOnlyTextFieldType tft = buildFieldType(fieldType, context); + return new MatchOnlyTextFieldMapper( + name, + fieldType, + tft, + buildPrefixMapper(context, fieldType, tft), + buildPhraseMapper(fieldType, tft), + multiFieldsBuilder.build(this, context), + copyTo.build(), + this + ); + } + + @Override + protected MatchOnlyTextFieldType buildFieldType(FieldType fieldType, BuilderContext context) { + NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer(); + NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer(); + NamedAnalyzer searchQuoteAnalyzer = analyzers.getSearchQuoteAnalyzer(); + + if (fieldType.indexOptions().compareTo(IndexOptions.DOCS) > 0) { + throw new IllegalArgumentException("Cannot set position_increment_gap on field [" + name + "] without positions enabled"); + } + if (positionIncrementGap.get() != POSITION_INCREMENT_GAP_USE_ANALYZER) { + if (fieldType.indexOptions().compareTo(IndexOptions.DOCS) < 0) { + throw new IllegalArgumentException( + "Cannot set position_increment_gap on field [" + name + "] without indexing enabled" + ); + } + indexAnalyzer = new NamedAnalyzer(indexAnalyzer, positionIncrementGap.get()); + searchAnalyzer = new NamedAnalyzer(searchAnalyzer, positionIncrementGap.get()); + searchQuoteAnalyzer = new NamedAnalyzer(searchQuoteAnalyzer, positionIncrementGap.get()); + } + TextSearchInfo tsi = new TextSearchInfo(fieldType, similarity.getValue(), searchAnalyzer, searchQuoteAnalyzer); + MatchOnlyTextFieldType ft = new MatchOnlyTextFieldType( + buildFullName(context), + index.getValue(), + fieldType.stored(), + tsi, + meta.getValue() + ); + ft.setIndexAnalyzer(indexAnalyzer); + ft.setEagerGlobalOrdinals(eagerGlobalOrdinals.getValue()); + ft.setBoost(boost.getValue()); + if (fieldData.getValue()) { + ft.setFielddata(true, freqFilter.getValue()); + } + return ft; + } + + @Override + protected List> getParameters() { + return Arrays.asList( + index, + store, + indexOptions, + norms, + termVectors, + analyzers.indexAnalyzer, + analyzers.searchAnalyzer, + analyzers.searchQuoteAnalyzer, + similarity, + positionIncrementGap, + fieldData, + freqFilter, + eagerGlobalOrdinals, + indexPhrases, + indexPrefixes, + boost, + meta + ); + } + } + + /** + * The specific field type for MatchOnlyTextFieldMapper + * + * @opensearch.internal + */ + public static final class MatchOnlyTextFieldType extends TextFieldType { + private final boolean indexPhrases = false; + + private PrefixFieldType prefixFieldType; + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + public MatchOnlyTextFieldType(String name, boolean indexed, boolean stored, TextSearchInfo tsi, Map meta) { + super(name, indexed, stored, tsi, meta); + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePosIncrements, QueryShardContext context) throws IOException { + PhraseQuery phraseQuery = (PhraseQuery) super.phraseQuery(stream, slop, enablePosIncrements); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (Term term : phraseQuery.getTerms()) { + builder.add(new TermQuery(term), BooleanClause.Occur.FILTER); + } + return new SourceFieldMatchQuery(builder.build(), phraseQuery, this, context); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements, QueryShardContext context) + throws IOException { + MultiPhraseQuery multiPhraseQuery = (MultiPhraseQuery) super.multiPhraseQuery(stream, slop, enablePositionIncrements); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (Term[] terms : multiPhraseQuery.getTermArrays()) { + if (terms.length > 1) { + // Multiple terms in the same position, creating a disjunction query for it and + // adding it to conjunction query + BooleanQuery.Builder disjunctions = new BooleanQuery.Builder(); + for (Term term : terms) { + disjunctions.add(new TermQuery(term), BooleanClause.Occur.SHOULD); + } + builder.add(disjunctions.build(), BooleanClause.Occur.FILTER); + } else { + builder.add(new TermQuery(terms[0]), BooleanClause.Occur.FILTER); + } + } + return new SourceFieldMatchQuery(builder.build(), multiPhraseQuery, this, context); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, QueryShardContext context) throws IOException { + Query phrasePrefixQuery = super.phrasePrefixQuery(stream, slop, maxExpansions); + List> termArray = getTermsFromTokenStream(stream); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (int i = 0; i < termArray.size(); i++) { + if (i == termArray.size() - 1) { + // last element of the term Array is a prefix, thus creating a prefix query for it and adding it to + // conjunction query + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery(name()); + mqb.add(termArray.get(i).toArray(new Term[0])); + builder.add(mqb, BooleanClause.Occur.FILTER); + } else { + if (termArray.get(i).size() > 1) { + // multiple terms in the same position, creating a disjunction query for it and + // adding it to conjunction query + BooleanQuery.Builder disjunctions = new BooleanQuery.Builder(); + for (Term term : termArray.get(i)) { + disjunctions.add(new TermQuery(term), BooleanClause.Occur.SHOULD); + } + builder.add(disjunctions.build(), BooleanClause.Occur.FILTER); + } else { + builder.add(new TermQuery(termArray.get(i).get(0)), BooleanClause.Occur.FILTER); + } + } + } + return new SourceFieldMatchQuery(builder.build(), phrasePrefixQuery, this, context); + } + + private List> getTermsFromTokenStream(TokenStream stream) throws IOException { + final List> termArray = new ArrayList<>(); + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); + List currentTerms = new ArrayList<>(); + stream.reset(); + while (stream.incrementToken()) { + if (posIncrAtt.getPositionIncrement() != 0) { + if (currentTerms.isEmpty() == false) { + termArray.add(List.copyOf(currentTerms)); + } + currentTerms.clear(); + } + currentTerms.add(new Term(name(), termAtt.getBytesRef())); + } + termArray.add(List.copyOf(currentTerms)); + return termArray; + } + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 1d0d1ae2bd899..d0e041e68a81d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -110,7 +110,7 @@ public class TextFieldMapper extends ParametrizedFieldMapper { public static final String CONTENT_TYPE = "text"; - private static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; + protected static final int POSITION_INCREMENT_GAP_USE_ANALYZER = -1; private static final String FAST_PHRASE_SUFFIX = "._index_phrase"; /** @@ -152,11 +152,11 @@ private static TextFieldMapper toType(FieldMapper in) { * * @opensearch.internal */ - private static final class PrefixConfig implements ToXContent { + protected static final class PrefixConfig implements ToXContent { final int minChars; final int maxChars; - private PrefixConfig(int minChars, int maxChars) { + PrefixConfig(int minChars, int maxChars) { this.minChars = minChars; this.maxChars = maxChars; if (minChars > maxChars) { @@ -198,7 +198,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - private static PrefixConfig parsePrefixConfig(String propName, ParserContext parserContext, Object propNode) { + static PrefixConfig parsePrefixConfig(String propName, ParserContext parserContext, Object propNode) { if (propNode == null) { return null; } @@ -214,7 +214,7 @@ private static PrefixConfig parsePrefixConfig(String propName, ParserContext par * * @opensearch.internal */ - private static final class FielddataFrequencyFilter implements ToXContent { + protected static final class FielddataFrequencyFilter implements ToXContent { final double minFreq; final double maxFreq; final int minSegmentSize; @@ -280,15 +280,14 @@ public static class Builder extends ParametrizedFieldMapper.Builder { private final Version indexCreatedVersion; - private final Parameter index = Parameter.indexParam(m -> toType(m).mappedFieldType.isSearchable(), true); - private final Parameter store = Parameter.storeParam(m -> toType(m).fieldType.stored(), false); + protected final Parameter index = Parameter.indexParam(m -> toType(m).mappedFieldType.isSearchable(), true); + protected final Parameter store = Parameter.storeParam(m -> toType(m).fieldType.stored(), false); final Parameter similarity = TextParams.similarity(m -> toType(m).similarity); final Parameter indexOptions = TextParams.indexOptions(m -> toType(m).indexOptions); final Parameter norms = TextParams.norms(true, m -> toType(m).fieldType.omitNorms() == false); final Parameter termVectors = TextParams.termVectors(m -> toType(m).termVectors); - final Parameter positionIncrementGap = Parameter.intParam( "position_increment_gap", false, @@ -332,8 +331,8 @@ public static class Builder extends ParametrizedFieldMapper.Builder { .orElse(null) ).acceptsNull(); - private final Parameter boost = Parameter.boostParam(); - private final Parameter> meta = Parameter.metaParam(); + protected final Parameter boost = Parameter.boostParam(); + protected final Parameter> meta = Parameter.metaParam(); final TextParams.Analyzers analyzers; @@ -395,7 +394,7 @@ protected List> getParameters() { ); } - private TextFieldType buildFieldType(FieldType fieldType, BuilderContext context) { + protected TextFieldType buildFieldType(FieldType fieldType, BuilderContext context) { NamedAnalyzer indexAnalyzer = analyzers.getIndexAnalyzer(); NamedAnalyzer searchAnalyzer = analyzers.getSearchAnalyzer(); NamedAnalyzer searchQuoteAnalyzer = analyzers.getSearchQuoteAnalyzer(); @@ -420,7 +419,7 @@ private TextFieldType buildFieldType(FieldType fieldType, BuilderContext context return ft; } - private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fieldType, TextFieldType tft) { + protected PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fieldType, TextFieldType tft) { if (indexPrefixes.get() == null) { return null; } @@ -454,7 +453,7 @@ private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fi return new PrefixFieldMapper(pft, prefixFieldType); } - private PhraseFieldMapper buildPhraseMapper(FieldType fieldType, TextFieldType parent) { + protected PhraseFieldMapper buildPhraseMapper(FieldType fieldType, TextFieldType parent) { if (indexPhrases.get() == false) { return null; } @@ -683,7 +682,7 @@ public Query existsQuery(QueryShardContext context) { * * @opensearch.internal */ - private static final class PhraseFieldMapper extends FieldMapper { + protected static final class PhraseFieldMapper extends FieldMapper { PhraseFieldMapper(FieldType fieldType, PhraseFieldType mappedFieldType) { super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); @@ -710,7 +709,7 @@ protected String contentType() { * * @opensearch.internal */ - private static final class PrefixFieldMapper extends FieldMapper { + protected static final class PrefixFieldMapper extends FieldMapper { protected PrefixFieldMapper(FieldType fieldType, PrefixFieldType mappedFieldType) { super(mappedFieldType.name(), fieldType, mappedFieldType, MultiFields.empty(), CopyTo.empty()); @@ -968,15 +967,15 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName, S } - private final FieldType fieldType; + protected final FieldType fieldType; private final PrefixFieldMapper prefixFieldMapper; private final PhraseFieldMapper phraseFieldMapper; private final SimilarityProvider similarity; private final String indexOptions; private final String termVectors; private final int positionIncrementGap; - private final Version indexCreatedVersion; - private final IndexAnalyzers indexAnalyzers; + protected final Version indexCreatedVersion; + protected final IndexAnalyzers indexAnalyzers; private final FielddataFrequencyFilter freqFilter; protected TextFieldMapper( diff --git a/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java b/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java new file mode 100644 index 0000000000000..b0be20e417efe --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/SourceFieldMatchQuery.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.SourceValueFetcher; +import org.opensearch.search.lookup.LeafSearchLookup; +import org.opensearch.search.lookup.SearchLookup; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * A query that matches against each document from the parent query by filtering using the source field values. + * Useful to query against field type which doesn't store positional data and field is not stored/computed dynamically. + */ +public class SourceFieldMatchQuery extends Query { + private final Query delegateQuery; + private final Query filter; + private final SearchLookup lookup; + private final MappedFieldType fieldType; + private final SourceValueFetcher valueFetcher; + private final QueryShardContext context; + + /** + * Constructs a SourceFieldMatchQuery. + * + * @param delegateQuery The parent query to use to find matches. + * @param filter The query used to filter further by running against field value fetched using _source field. + * @param fieldType The mapped field type. + * @param context The QueryShardContext to get lookup and valueFetcher + */ + public SourceFieldMatchQuery(Query delegateQuery, Query filter, MappedFieldType fieldType, QueryShardContext context) { + this.delegateQuery = delegateQuery; + this.filter = filter; + this.fieldType = fieldType; + this.context = context; + this.lookup = context.lookup(); + if (!context.documentMapper("").sourceMapper().enabled()) { + throw new IllegalArgumentException( + "SourceFieldMatchQuery error: unable to fetch fields from _source field: _source is disabled in the mappings " + + "for index [" + + context.index().getName() + + "]" + ); + } + this.valueFetcher = (SourceValueFetcher) fieldType.valueFetcher(context, lookup, null); + } + + @Override + public void visit(QueryVisitor visitor) { + delegateQuery.visit(visitor); + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + Query rewritten = indexSearcher.rewrite(delegateQuery); + if (rewritten == delegateQuery) { + return this; + } + return new SourceFieldMatchQuery(rewritten, filter, fieldType, context); + } + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + + Weight weight = delegateQuery.createWeight(searcher, ScoreMode.TOP_DOCS, boost); + + return new ConstantScoreWeight(this, boost) { + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + + Scorer scorer = weight.scorer(context); + if (scorer == null) { + // none of the docs are matching + return null; + } + DocIdSetIterator approximation = scorer.iterator(); + LeafSearchLookup leafSearchLookup = lookup.getLeafSearchLookup(context); + TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) { + + @Override + public boolean matches() { + leafSearchLookup.setDocument(approximation.docID()); + List values = valueFetcher.fetchValues(leafSearchLookup.source()); + // Missing fields won't count as match. Can we use a default value for missing field? + if (values.isEmpty()) { + return false; + } + MemoryIndex memoryIndex = new MemoryIndex(); + for (Object value : values) { + memoryIndex.addField(fieldType.name(), (String) value, fieldType.indexAnalyzer()); + } + float score = memoryIndex.search(filter); + return score > 0.0f; + } + + @Override + public float matchCost() { + // arbitrary cost + return 1000f; + } + }; + return new ConstantScoreScorer(this, score(), ScoreMode.TOP_DOCS, twoPhase); + } + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + // It is fine to cache if delegate query weight is cacheable since additional logic here + // is just a filter on top of delegate query matches + return weight.isCacheable(ctx); + } + }; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (sameClassAs(o) == false) { + return false; + } + SourceFieldMatchQuery other = (SourceFieldMatchQuery) o; + return Objects.equals(this.delegateQuery, other.delegateQuery) + && Objects.equals(this.filter, other.filter) + && Objects.equals(this.fieldType, other.fieldType) + && Objects.equals(this.context, other.context); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), delegateQuery, filter, fieldType, context); + } + + @Override + public String toString(String f) { + return "SourceFieldMatchQuery (delegate query: [ " + delegateQuery.toString(f) + " ], filter query: [ " + filter.toString(f) + "])"; + } +} diff --git a/server/src/main/java/org/opensearch/index/search/MatchQuery.java b/server/src/main/java/org/opensearch/index/search/MatchQuery.java index 9e2b79971369d..ec6755ea25703 100644 --- a/server/src/main/java/org/opensearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MatchQuery.java @@ -67,6 +67,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.support.QueryParsers; @@ -701,7 +702,7 @@ private Query analyzeMultiBoolean(String field, TokenStream stream, BooleanClaus protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { try { checkForPositions(field); - return fieldType.phraseQuery(stream, slop, enablePositionIncrements); + return fieldType.phraseQuery(stream, slop, enablePositionIncrements, context); } catch (IllegalArgumentException | IllegalStateException e) { if (lenient) { return newLenientFieldQuery(field, e); @@ -714,7 +715,7 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws protected Query analyzeMultiPhrase(String field, TokenStream stream, int slop) throws IOException { try { checkForPositions(field); - return fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements); + return fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); } catch (IllegalArgumentException | IllegalStateException e) { if (lenient) { return newLenientFieldQuery(field, e); @@ -728,7 +729,7 @@ private Query analyzePhrasePrefix(String field, TokenStream stream, int slop, in if (positionCount > 1) { checkForPositions(field); } - return fieldType.phrasePrefixQuery(stream, slop, maxExpansions); + return fieldType.phrasePrefixQuery(stream, slop, maxExpansions, context); } catch (IllegalArgumentException | IllegalStateException e) { if (lenient) { return newLenientFieldQuery(field, e); @@ -887,6 +888,9 @@ private Query analyzeGraphPhrase(TokenStream source, String field, Type type, in private void checkForPositions(String field) { if (fieldType.getTextSearchInfo().hasPositions() == false) { + if (fieldType instanceof MatchOnlyTextFieldMapper.MatchOnlyTextFieldType) { + return; + } throw new IllegalStateException("field:[" + field + "] was indexed without position data; cannot run PhraseQuery"); } } diff --git a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java index 241f05af2c512..8c0c87e8c9d0c 100644 --- a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java @@ -248,7 +248,7 @@ protected Query newPrefixQuery(Term term) { protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { List disjunctions = new ArrayList<>(); for (FieldAndBoost fieldType : blendedFields) { - Query query = fieldType.fieldType.phraseQuery(stream, slop, enablePositionIncrements); + Query query = fieldType.fieldType.phraseQuery(stream, slop, enablePositionIncrements, context); if (fieldType.boost != 1f) { query = new BoostQuery(query, fieldType.boost); } @@ -261,7 +261,7 @@ protected Query analyzePhrase(String field, TokenStream stream, int slop) throws protected Query analyzeMultiPhrase(String field, TokenStream stream, int slop) throws IOException { List disjunctions = new ArrayList<>(); for (FieldAndBoost fieldType : blendedFields) { - Query query = fieldType.fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements); + Query query = fieldType.fieldType.multiPhraseQuery(stream, slop, enablePositionIncrements, context); if (fieldType.boost != 1f) { query = new BoostQuery(query, fieldType.boost); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index 5c2137ec742a4..eea5dbbf57f6c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -59,6 +59,7 @@ import org.opensearch.index.mapper.IpFieldMapper; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.MetadataFieldMapper; import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; @@ -158,6 +159,7 @@ public static Map getMappers(List mappe mappers.put(nanoseconds.type(), DateFieldMapper.NANOS_PARSER); mappers.put(IpFieldMapper.CONTENT_TYPE, IpFieldMapper.PARSER); mappers.put(TextFieldMapper.CONTENT_TYPE, TextFieldMapper.PARSER); + mappers.put(MatchOnlyTextFieldMapper.CONTENT_TYPE, MatchOnlyTextFieldMapper.PARSER); mappers.put(KeywordFieldMapper.CONTENT_TYPE, KeywordFieldMapper.PARSER); mappers.put(ObjectMapper.CONTENT_TYPE, new ObjectMapper.TypeParser()); mappers.put(ObjectMapper.NESTED_CONTENT_TYPE, new ObjectMapper.TypeParser()); diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldAnalyzerModeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldAnalyzerModeTests.java new file mode 100644 index 0000000000000..13cb279418fa8 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldAnalyzerModeTests.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +public class MatchOnlyTextFieldAnalyzerModeTests extends TextFieldAnalyzerModeTests { + @Override + ParametrizedFieldMapper.TypeParser getTypeParser() { + return MatchOnlyTextFieldMapper.PARSER; + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java new file mode 100644 index 0000000000000..580f8cccc9af5 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java @@ -0,0 +1,450 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.IndexableFieldType; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; +import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.MatchPhrasePrefixQueryBuilder; +import org.opensearch.index.query.MatchPhraseQueryBuilder; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.SourceFieldMatchQuery; +import org.opensearch.index.search.MatchQuery; +import org.junit.Before; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.Is.is; + +public class MatchOnlyTextFieldMapperTests extends TextFieldMapperTests { + + @Before + public void setupMatchOnlyTextFieldMapper() { + textFieldName = "match_only_text"; + } + + @Override + public void testDefaults() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + assertEquals(fieldMapping(this::minimalMapping).toString(), mapper.mappingSource().toString()); + + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertEquals("1234", fields[0].stringValue()); + IndexableFieldType fieldType = fields[0].fieldType(); + assertThat(fieldType.omitNorms(), equalTo(true)); + assertTrue(fieldType.tokenized()); + assertFalse(fieldType.stored()); + assertThat(fieldType.indexOptions(), equalTo(IndexOptions.DOCS)); + assertThat(fieldType.storeTermVectors(), equalTo(false)); + assertThat(fieldType.storeTermVectorOffsets(), equalTo(false)); + assertThat(fieldType.storeTermVectorPositions(), equalTo(false)); + assertThat(fieldType.storeTermVectorPayloads(), equalTo(false)); + assertEquals(DocValuesType.NONE, fieldType.docValuesType()); + } + + @Override + public void testEnableStore() throws IOException { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("store", true))); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); + IndexableField[] fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.length); + assertTrue(fields[0].fieldType().stored()); + } + + @Override + public void testIndexOptions() throws IOException { + Map supportedOptions = new HashMap<>(); + supportedOptions.put("docs", IndexOptions.DOCS); + + Map unsupportedOptions = new HashMap<>(); + unsupportedOptions.put("freqs", IndexOptions.DOCS_AND_FREQS); + unsupportedOptions.put("positions", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + unsupportedOptions.put("offsets", IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + + for (String option : supportedOptions.keySet()) { + XContentBuilder mapping = MediaTypeRegistry.JSON.contentBuilder().startObject().startObject("_doc").startObject("properties"); + mapping.startObject(option).field("type", textFieldName).field("index_options", option).endObject(); + mapping.endObject().endObject().endObject(); + + DocumentMapper mapper = createDocumentMapper(mapping); + String serialized = Strings.toString(MediaTypeRegistry.JSON, mapper); + assertThat(serialized, containsString("\"docs\":{\"type\":\"match_only_text\"}")); + + ParsedDocument doc = mapper.parse(source(b -> { b.field(option, "1234"); })); + + IndexOptions options = supportedOptions.get(option); + IndexableField[] fields = doc.rootDoc().getFields(option); + assertEquals(1, fields.length); + assertEquals(options, fields[0].fieldType().indexOptions()); + } + + for (String option : unsupportedOptions.keySet()) { + XContentBuilder mapping = MediaTypeRegistry.JSON.contentBuilder().startObject().startObject("_doc").startObject("properties"); + mapping.startObject(option).field("type", textFieldName).field("index_options", option).endObject(); + mapping.endObject().endObject().endObject(); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> createDocumentMapper(mapping)); + assertThat( + e.getMessage(), + containsString( + "Failed to parse mapping [_doc]: Unknown value [" + option + "] for field [index_options] - accepted values are [docs]" + ) + ); + } + } + + @Override + public void testAnalyzedFieldPositionIncrementWithoutPositions() { + for (String indexOptions : List.of("docs")) { + try { + createDocumentMapper( + fieldMapping( + b -> b.field("type", textFieldName).field("index_options", indexOptions).field("position_increment_gap", 10) + ) + ); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } + + @Override + public void testBWCSerialization() throws IOException {} + + @Override + public void testPositionIncrementGap() throws IOException {} + + @Override + public void testDefaultPositionIncrementGap() throws IOException {} + + @Override + public void testMinimalToMaximal() throws IOException {} + + @Override + public void testIndexPrefixMapping() throws IOException { + MapperParsingException e = expectThrows( + MapperParsingException.class, + () -> createDocumentMapper( + fieldMapping( + b -> b.field("type", textFieldName) + .field("analyzer", "standard") + .startObject("index_prefixes") + .field("min_chars", 2) + .field("max_chars", 10) + .endObject() + ) + ) + ); + assertEquals( + "Failed to parse mapping [_doc]: Index prefixes cannot be enabled on for match_only_text field. Use text field instead", + e.getMessage() + ); + } + + @Override + public void testIndexPrefixIndexTypes() throws IOException { + // not supported and asserted the expected behavior in testIndexPrefixMapping + } + + @Override + public void testFastPhrasePrefixes() throws IOException { + // not supported and asserted the expected behavior in testIndexPrefixMapping + } + + public void testPhrasePrefixes() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", textFieldName); + b.field("analyzer", "my_stop_analyzer"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + b.startObject("synfield"); + { + b.field("type", textFieldName); + b.field("analyzer", "standard"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "words")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.add(new Term("field", "two")); + mqbFilter.add(new Term("field", "words")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "three words here").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "here")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.add(new Term("field", "three")); + mqbFilter.add(new Term("field", "words")); + mqbFilter.add(new Term("field", "here")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "three")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").slop(1).toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "words")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.setSlop(1); + mqbFilter.add(new Term("field", "two")); + mqbFilter.add(new Term("field", "words")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "singleton").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "singleton")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(mqb, BooleanClause.Occur.FILTER).build(), + mqb, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, is(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "sparkle a stopword").toQuery(queryShardContext); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("field"); + mqb.add(new Term("field", "stopword")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("field"); + mqbFilter.add(new Term("field", "sparkle")); + mqbFilter.add(new Term[] { new Term("field", "stopword") }, 2); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "sparkle")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q = matchQuery.parse(MatchQuery.Type.PHRASE_PREFIX, "synfield", "motor dogs"); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("synfield"); + mqb.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("synfield"); + mqbFilter.add(new Term("synfield", "motor")); + mqbFilter.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "motor")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setPhraseSlop(1); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q = matchQuery.parse(MatchQuery.Type.PHRASE_PREFIX, "synfield", "two dogs"); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("synfield"); + mqb.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("synfield"); + mqbFilter.add(new Term("synfield", "two")); + mqbFilter.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + mqbFilter.setSlop(1); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "two")), BooleanClause.Occur.FILTER) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + + { + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q = matchQuery.parse(MatchQuery.Type.PHRASE_PREFIX, "synfield", "three dogs word"); + MultiPhrasePrefixQuery mqb = new MultiPhrasePrefixQuery("synfield"); + mqb.add(new Term("synfield", "word")); + MultiPhrasePrefixQuery mqbFilter = new MultiPhrasePrefixQuery("synfield"); + mqbFilter.add(new Term("synfield", "three")); + mqbFilter.add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }); + mqbFilter.add(new Term("synfield", "word")); + Query expected = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "three")), BooleanClause.Occur.FILTER) + .add( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "dogs")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("synfield", "dog")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.FILTER + ) + .add(mqb, BooleanClause.Occur.FILTER) + .build(), + mqbFilter, + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q, equalTo(expected)); + } + } + + @Override + public void testFastPhraseMapping() throws IOException { + MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(mapping(b -> { + b.startObject("field") + .field("type", textFieldName) + .field("analyzer", "my_stop_analyzer") + .field("index_phrases", true) + .endObject(); + // "standard" will be replaced with MockSynonymAnalyzer + b.startObject("synfield").field("type", textFieldName).field("analyzer", "standard").field("index_phrases", true).endObject(); + }))); + assertEquals( + "Failed to parse mapping [_doc]: Index phrases cannot be enabled on for match_only_text field. Use text field instead", + e.getMessage() + ); + } + + @Override + public void testSimpleMerge() throws IOException {} + + public void testPhraseQuery() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field").field("type", textFieldName).field("analyzer", "my_stop_analyzer").endObject(); + // "standard" will be replaced with MockSynonymAnalyzer + b.startObject("synfield").field("type", textFieldName).field("analyzer", "standard").endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + Query q = new MatchPhraseQueryBuilder("field", "two words").toQuery(queryShardContext); + Query expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery("field", "two", "words"), + mapperService.fieldType("field"), + queryShardContext + ); + + assertThat(q, is(expectedQuery)); + Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext); + assertThat(q4, is(new TermQuery(new Term("field", "singleton")))); + + Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "three")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "here")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery("field", "three", "words", "here"), + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q2, is(expectedQuery)); + + Query q3 = new MatchPhraseQueryBuilder("field", "two words").slop(2).toQuery(queryShardContext); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "two")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "words")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery(2, "field", "two", "words"), + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q3, is(expectedQuery)); + + Query q5 = new MatchPhraseQueryBuilder("field", "sparkle a stopword").toQuery(queryShardContext); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("field", "sparkle")), BooleanClause.Occur.FILTER) + .add(new TermQuery(new Term("field", "stopword")), BooleanClause.Occur.FILTER) + .build(), + new PhraseQuery.Builder().add(new Term("field", "sparkle")).add(new Term("field", "stopword"), 2).build(), + mapperService.fieldType("field"), + queryShardContext + ); + assertThat(q5, is(expectedQuery)); + + MatchQuery matchQuery = new MatchQuery(queryShardContext); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + Query q6 = matchQuery.parse(MatchQuery.Type.PHRASE, "synfield", "motor dogs"); + expectedQuery = new SourceFieldMatchQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "motor")), BooleanClause.Occur.FILTER) + .add( + new BooleanQuery.Builder().add(new TermQuery(new Term("synfield", "dogs")), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term("synfield", "dog")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.FILTER + ) + .build(), + new MultiPhraseQuery.Builder().add(new Term("synfield", "motor")) + .add(new Term[] { new Term("synfield", "dogs"), new Term("synfield", "dog") }, 1) + .build(), + mapperService.fieldType("synfield"), + queryShardContext + ); + assertThat(q6, is(expectedQuery)); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java new file mode 100644 index 0000000000000..51234fa04ddc2 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.opensearch.common.lucene.Lucene; + +public class MatchOnlyTextFieldTypeTests extends TextFieldTypeTests { + + @Override + TextFieldMapper.TextFieldType createFieldType(boolean searchable) { + TextSearchInfo tsi = new TextSearchInfo( + TextFieldMapper.Defaults.FIELD_TYPE, + null, + Lucene.STANDARD_ANALYZER, + Lucene.STANDARD_ANALYZER + ); + return new MatchOnlyTextFieldMapper.MatchOnlyTextFieldType( + "field", + searchable, + false, + tsi, + ParametrizedFieldMapper.Parameter.metaParam().get() + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java index 93bed729f0974..83a3bdc580ae6 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldAnalyzerModeTests.java @@ -59,6 +59,9 @@ import static org.mockito.Mockito.when; public class TextFieldAnalyzerModeTests extends OpenSearchTestCase { + ParametrizedFieldMapper.TypeParser getTypeParser() { + return TextFieldMapper.PARSER; + } private static Map defaultAnalyzers() { Map analyzers = new HashMap<>(); @@ -101,7 +104,7 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); + getTypeParser().parse("field", fieldNode, parserContext); // check that "analyzer" set to something that only supports AnalysisMode.SEARCH_TIME or AnalysisMode.INDEX_TIME is blocked AnalysisMode mode = randomFrom(AnalysisMode.SEARCH_TIME, AnalysisMode.INDEX_TIME); @@ -110,7 +113,7 @@ public void testParseTextFieldCheckAnalyzerAnalysisMode() { indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); fieldNode.put("analyzer", "my_analyzer"); - MapperException ex = expectThrows(MapperException.class, () -> { TextFieldMapper.PARSER.parse("name", fieldNode, parserContext); }); + MapperException ex = expectThrows(MapperException.class, () -> { getTypeParser().parse("name", fieldNode, parserContext); }); assertThat( ex.getMessage(), containsString("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run") @@ -136,7 +139,7 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - TextFieldMapper.PARSER.parse("textField", fieldNode, parserContext); + getTypeParser().parse("textField", fieldNode, parserContext); // check that "analyzer" set to AnalysisMode.INDEX_TIME is blocked mode = AnalysisMode.INDEX_TIME; @@ -151,10 +154,7 @@ public void testParseTextFieldCheckSearchAnalyzerAnalysisMode() { if (settingToTest.equals("search_quote_analyzer")) { fieldNode.put("search_analyzer", "standard"); } - MapperException ex = expectThrows( - MapperException.class, - () -> { TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); } - ); + MapperException ex = expectThrows(MapperException.class, () -> { getTypeParser().parse("field", fieldNode, parserContext); }); assertEquals( "analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run in search time mode.", ex.getMessage() @@ -174,10 +174,7 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { analyzers.put("my_analyzer", new NamedAnalyzer("my_named_analyzer", AnalyzerScope.INDEX, createAnalyzerWithMode(mode))); IndexAnalyzers indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - MapperException ex = expectThrows( - MapperException.class, - () -> { TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); } - ); + MapperException ex = expectThrows(MapperException.class, () -> { getTypeParser().parse("field", fieldNode, parserContext); }); assertThat( ex.getMessage(), containsString("analyzer [my_named_analyzer] contains filters [my_analyzer] that are not allowed to run") @@ -193,7 +190,6 @@ public void testParseTextFieldCheckAnalyzerWithSearchAnalyzerAnalysisMode() { indexAnalyzers = new IndexAnalyzers(analyzers, Collections.emptyMap(), Collections.emptyMap()); when(parserContext.getIndexAnalyzers()).thenReturn(indexAnalyzers); - TextFieldMapper.PARSER.parse("field", fieldNode, parserContext); + getTypeParser().parse("field", fieldNode, parserContext); } - } diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java index a9b902e121bda..a22bfa5e845b1 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java @@ -81,6 +81,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.search.MatchQuery; +import org.junit.Before; import java.io.IOException; import java.util.Arrays; @@ -95,6 +96,13 @@ public class TextFieldMapperTests extends MapperTestCase { + public String textFieldName = "text"; + + @Before + public void setup() { + textFieldName = "text"; + } + @Override protected void writeFieldValue(XContentBuilder builder) throws IOException { builder.value(1234); @@ -169,30 +177,34 @@ protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck("index", b -> b.field("index", false)); checker.registerConflictCheck("store", b -> b.field("store", true)); - checker.registerConflictCheck("index_phrases", b -> b.field("index_phrases", true)); - checker.registerConflictCheck("index_prefixes", b -> b.startObject("index_prefixes").endObject()); - checker.registerConflictCheck("index_options", b -> b.field("index_options", "docs")); + if (!textFieldName.equals("match_only_text")) { + checker.registerConflictCheck("index_phrases", b -> b.field("index_phrases", true)); + checker.registerConflictCheck("index_prefixes", b -> b.startObject("index_prefixes").endObject()); + checker.registerConflictCheck("index_options", b -> b.field("index_options", "docs")); + } checker.registerConflictCheck("similarity", b -> b.field("similarity", "boolean")); checker.registerConflictCheck("analyzer", b -> b.field("analyzer", "keyword")); checker.registerConflictCheck("term_vector", b -> b.field("term_vector", "yes")); checker.registerConflictCheck("position_increment_gap", b -> b.field("position_increment_gap", 10)); - // norms can be set from true to false, but not vice versa - checker.registerConflictCheck("norms", fieldMapping(b -> { - b.field("type", "text"); - b.field("norms", false); - }), fieldMapping(b -> { - b.field("type", "text"); - b.field("norms", true); - })); - checker.registerUpdateCheck(b -> { - b.field("type", "text"); - b.field("norms", true); - }, b -> { - b.field("type", "text"); - b.field("norms", false); - }, m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms())); + if (!textFieldName.equals(MatchOnlyTextFieldMapper.CONTENT_TYPE)) { + // norms can be set from true to false, but not vice versa + checker.registerConflictCheck("norms", fieldMapping(b -> { + b.field("type", textFieldName); + b.field("norms", false); + }), fieldMapping(b -> { + b.field("type", textFieldName); + b.field("norms", true); + })); + checker.registerUpdateCheck(b -> { + b.field("type", textFieldName); + b.field("norms", true); + }, b -> { + b.field("type", textFieldName); + b.field("norms", false); + }, m -> assertFalse(m.fieldType().getTextSearchInfo().hasNorms())); + } checker.registerUpdateCheck(b -> b.field("boost", 2.0), m -> assertEquals(m.fieldType().boost(), 2.0, 0)); @@ -237,7 +249,7 @@ public TokenStream create(TokenStream tokenStream) { @Override protected void minimalMapping(XContentBuilder b) throws IOException { - b.field("type", "text"); + b.field("type", textFieldName); } public void testDefaults() throws IOException { @@ -262,7 +274,7 @@ public void testDefaults() throws IOException { public void testBWCSerialization() throws IOException { MapperService mapperService = createMapperService(fieldMapping(b -> { - b.field("type", "text"); + b.field("type", textFieldName); b.field("fielddata", true); b.startObject("fields"); { @@ -312,7 +324,7 @@ public void testBWCSerialization() throws IOException { } public void testEnableStore() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("store", true))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("store", true))); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -320,14 +332,14 @@ public void testEnableStore() throws IOException { } public void testDisableIndex() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("index", false))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("index", false))); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(0, fields.length); } public void testDisableNorms() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("norms", false))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("norms", false))); ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); @@ -343,7 +355,7 @@ public void testIndexOptions() throws IOException { XContentBuilder mapping = MediaTypeRegistry.JSON.contentBuilder().startObject().startObject("_doc").startObject("properties"); for (String option : supportedOptions.keySet()) { - mapping.startObject(option).field("type", "text").field("index_options", option).endObject(); + mapping.startObject(option).field("type", textFieldName).field("index_options", option).endObject(); } mapping.endObject().endObject().endObject(); @@ -389,7 +401,7 @@ public void testDefaultPositionIncrementGap() throws IOException { public void testPositionIncrementGap() throws IOException { final int positionIncrementGap = randomIntBetween(1, 1000); MapperService mapperService = createMapperService( - fieldMapping(b -> b.field("type", "text").field("position_increment_gap", positionIncrementGap)) + fieldMapping(b -> b.field("type", textFieldName).field("position_increment_gap", positionIncrementGap)) ); ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b" }))); @@ -409,16 +421,16 @@ public void testPositionIncrementGap() throws IOException { public void testSearchAnalyzerSerialization() throws IOException { XContentBuilder mapping = fieldMapping( - b -> b.field("type", "text").field("analyzer", "standard").field("search_analyzer", "keyword") + b -> b.field("type", textFieldName).field("analyzer", "standard").field("search_analyzer", "keyword") ); assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); // special case: default index analyzer - mapping = fieldMapping(b -> b.field("type", "text").field("analyzer", "default").field("search_analyzer", "keyword")); + mapping = fieldMapping(b -> b.field("type", textFieldName).field("analyzer", "default").field("search_analyzer", "keyword")); assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); // special case: default search analyzer - mapping = fieldMapping(b -> b.field("type", "text").field("analyzer", "keyword").field("search_analyzer", "default")); + mapping = fieldMapping(b -> b.field("type", textFieldName).field("analyzer", "keyword").field("search_analyzer", "default")); assertEquals(mapping.toString(), createDocumentMapper(mapping).mappingSource().toString()); XContentBuilder builder = MediaTypeRegistry.JSON.contentBuilder(); @@ -436,7 +448,7 @@ public void testSearchAnalyzerSerialization() throws IOException { public void testSearchQuoteAnalyzerSerialization() throws IOException { XContentBuilder mapping = fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .field("search_analyzer", "standard") .field("search_quote_analyzer", "keyword") @@ -445,7 +457,7 @@ public void testSearchQuoteAnalyzerSerialization() throws IOException { // special case: default index/search analyzer mapping = fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "default") .field("search_analyzer", "default") .field("search_quote_analyzer", "keyword") @@ -456,27 +468,27 @@ public void testSearchQuoteAnalyzerSerialization() throws IOException { public void testTermVectors() throws IOException { XContentBuilder mapping = mapping( b -> b.startObject("field1") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "no") .endObject() .startObject("field2") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "yes") .endObject() .startObject("field3") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_offsets") .endObject() .startObject("field4") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_positions") .endObject() .startObject("field5") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_positions_offsets") .endObject() .startObject("field6") - .field("type", "text") + .field("type", textFieldName) .field("term_vector", "with_positions_offsets_payloads") .endObject() ); @@ -526,7 +538,9 @@ public void testTermVectors() throws IOException { } public void testEagerGlobalOrdinals() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("eager_global_ordinals", true))); + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", textFieldName).field("eager_global_ordinals", true)) + ); FieldMapper fieldMapper = (FieldMapper) mapper.mappers().getMapper("field"); assertTrue(fieldMapper.fieldType().eagerGlobalOrdinals()); @@ -539,13 +553,13 @@ public void testFielddata() throws IOException { })); assertThat(e.getMessage(), containsString("Text fields are not optimised for operations that require per-document field data")); - MapperService enabledMapper = createMapperService(fieldMapping(b -> b.field("type", "text").field("fielddata", true))); + MapperService enabledMapper = createMapperService(fieldMapping(b -> b.field("type", textFieldName).field("fielddata", true))); enabledMapper.fieldType("field").fielddataBuilder("test", () -> { throw new UnsupportedOperationException(); }); // no exception // this time e = expectThrows( MapperParsingException.class, - () -> createMapperService(fieldMapping(b -> b.field("type", "text").field("index", false).field("fielddata", true))) + () -> createMapperService(fieldMapping(b -> b.field("type", textFieldName).field("index", false).field("fielddata", true))) ); assertThat(e.getMessage(), containsString("Cannot enable fielddata on a [text] field that is not indexed")); } @@ -553,7 +567,7 @@ public void testFielddata() throws IOException { public void testFrequencyFilter() throws IOException { MapperService mapperService = createMapperService( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("fielddata", true) .startObject("fielddata_frequency_filter") .field("min", 2d) @@ -571,17 +585,22 @@ public void testFrequencyFilter() throws IOException { public void testNullConfigValuesFail() throws MapperParsingException { Exception e = expectThrows( MapperParsingException.class, - () -> createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("analyzer", (String) null))) + () -> createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).field("analyzer", (String) null))) + ); + assertThat( + e.getMessage(), + containsString("[analyzer] on mapper [field] of type [" + textFieldName + "] must not have a [null] value") ); - assertThat(e.getMessage(), containsString("[analyzer] on mapper [field] of type [text] must not have a [null] value")); } public void testNotIndexedFieldPositionIncrement() { Exception e = expectThrows( MapperParsingException.class, - () -> createDocumentMapper(fieldMapping(b -> b.field("type", "text").field("index", false).field("position_increment_gap", 10))) + () -> createDocumentMapper( + fieldMapping(b -> b.field("type", textFieldName).field("index", false).field("position_increment_gap", 10)) + ) ); - assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field] without positions enabled")); + assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field]")); } public void testAnalyzedFieldPositionIncrementWithoutPositions() { @@ -589,7 +608,9 @@ public void testAnalyzedFieldPositionIncrementWithoutPositions() { Exception e = expectThrows( MapperParsingException.class, () -> createDocumentMapper( - fieldMapping(b -> b.field("type", "text").field("index_options", indexOptions).field("position_increment_gap", 10)) + fieldMapping( + b -> b.field("type", textFieldName).field("index_options", indexOptions).field("position_increment_gap", 10) + ) ) ); assertThat(e.getMessage(), containsString("Cannot set position_increment_gap on field [field] without positions enabled")); @@ -600,7 +621,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -615,7 +636,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -632,7 +653,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -649,7 +670,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -666,7 +687,7 @@ public void testIndexPrefixIndexTypes() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .endObject() @@ -682,62 +703,18 @@ public void testIndexPrefixIndexTypes() throws IOException { } public void testNestedIndexPrefixes() throws IOException { - { - MapperService mapperService = createMapperService( - mapping( - b -> b.startObject("object") - .field("type", "object") - .startObject("properties") - .startObject("field") - .field("type", "text") - .startObject("index_prefixes") - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - MappedFieldType textField = mapperService.fieldType("object.field"); - assertNotNull(textField); - assertThat(textField, instanceOf(TextFieldType.class)); - MappedFieldType prefix = ((TextFieldType) textField).getPrefixFieldType(); - assertEquals(prefix.name(), "object.field._index_prefix"); - FieldMapper mapper = (FieldMapper) mapperService.documentMapper().mappers().getMapper("object.field._index_prefix"); - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, mapper.fieldType.indexOptions()); - assertFalse(mapper.fieldType.storeTermVectorOffsets()); - } - { - MapperService mapperService = createMapperService( - mapping( - b -> b.startObject("body") - .field("type", "text") - .startObject("fields") - .startObject("with_prefix") - .field("type", "text") - .startObject("index_prefixes") - .endObject() - .endObject() - .endObject() - .endObject() - ) - ); - MappedFieldType textField = mapperService.fieldType("body.with_prefix"); - assertNotNull(textField); - assertThat(textField, instanceOf(TextFieldType.class)); - MappedFieldType prefix = ((TextFieldType) textField).getPrefixFieldType(); - assertEquals(prefix.name(), "body.with_prefix._index_prefix"); - FieldMapper mapper = (FieldMapper) mapperService.documentMapper().mappers().getMapper("body.with_prefix._index_prefix"); - assertEquals(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS, mapper.fieldType.indexOptions()); - assertFalse(mapper.fieldType.storeTermVectorOffsets()); - } } public void testFastPhraseMapping() throws IOException { MapperService mapperService = createMapperService(mapping(b -> { - b.startObject("field").field("type", "text").field("analyzer", "my_stop_analyzer").field("index_phrases", true).endObject(); + b.startObject("field") + .field("type", textFieldName) + .field("analyzer", "my_stop_analyzer") + .field("index_phrases", true) + .endObject(); // "standard" will be replaced with MockSynonymAnalyzer - b.startObject("synfield").field("type", "text").field("analyzer", "standard").field("index_phrases", true).endObject(); + b.startObject("synfield").field("type", textFieldName).field("analyzer", "standard").field("index_phrases", true).endObject(); })); QueryShardContext queryShardContext = createQueryShardContext(mapperService); @@ -808,14 +785,16 @@ protected TokenStreamComponents createComponents(String fieldName) { Exception e = expectThrows( MapperParsingException.class, - () -> createMapperService(fieldMapping(b -> b.field("type", "text").field("index", "false").field("index_phrases", true))) + () -> createMapperService( + fieldMapping(b -> b.field("type", textFieldName).field("index", "false").field("index_phrases", true)) + ) ); assertThat(e.getMessage(), containsString("Cannot set index_phrases on unindexed field [field]")); e = expectThrows( MapperParsingException.class, () -> createMapperService( - fieldMapping(b -> b.field("type", "text").field("index_options", "freqs").field("index_phrases", true)) + fieldMapping(b -> b.field("type", textFieldName).field("index_options", "freqs").field("index_phrases", true)) ) ); assertThat(e.getMessage(), containsString("Cannot set index_phrases on field [field] if positions are not enabled")); @@ -826,7 +805,7 @@ public void testIndexPrefixMapping() throws IOException { { DocumentMapper mapper = createDocumentMapper( fieldMapping( - b -> b.field("type", "text") + b -> b.field("type", textFieldName) .field("analyzer", "standard") .startObject("index_prefixes") .field("min_chars", 2) @@ -844,29 +823,29 @@ public void testIndexPrefixMapping() throws IOException { { DocumentMapper mapper = createDocumentMapper( - fieldMapping(b -> b.field("type", "text").field("analyzer", "standard").startObject("index_prefixes").endObject()) + fieldMapping(b -> b.field("type", textFieldName).field("analyzer", "standard").startObject("index_prefixes").endObject()) ); assertThat(mapper.mappers().getMapper("field._index_prefix").toString(), containsString("prefixChars=2:5")); } { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "text").nullField("index_prefixes"))); + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", textFieldName).nullField("index_prefixes"))); assertNull(mapper.mappers().getMapper("field._index_prefix")); } { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 1).field("max_chars", 10).endObject(); - b.startObject("fields").startObject("_index_prefix").field("type", "text").endObject().endObject(); + b.startObject("fields").startObject("_index_prefix").field("type", textFieldName).endObject().endObject(); }))); assertThat(e.getMessage(), containsString("Field [field._index_prefix] is defined more than once")); } { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 11).field("max_chars", 10).endObject(); }))); assertThat(e.getMessage(), containsString("min_chars [11] must be less than max_chars [10]")); @@ -874,7 +853,7 @@ public void testIndexPrefixMapping() throws IOException { { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 0).field("max_chars", 10).endObject(); }))); assertThat(e.getMessage(), containsString("min_chars [0] must be greater than zero")); @@ -882,7 +861,7 @@ public void testIndexPrefixMapping() throws IOException { { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard"); + b.field("type", textFieldName).field("analyzer", "standard"); b.startObject("index_prefixes").field("min_chars", 1).field("max_chars", 25).endObject(); }))); assertThat(e.getMessage(), containsString("max_chars [25] must be less than 20")); @@ -890,7 +869,7 @@ public void testIndexPrefixMapping() throws IOException { { MapperParsingException e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { - b.field("type", "text").field("analyzer", "standard").field("index", false); + b.field("type", textFieldName).field("analyzer", "standard").field("index", false); b.startObject("index_prefixes").endObject(); }))); assertThat(e.getMessage(), containsString("Cannot set index_prefixes on unindexed field [field]")); @@ -901,14 +880,14 @@ public void testFastPhrasePrefixes() throws IOException { MapperService mapperService = createMapperService(mapping(b -> { b.startObject("field"); { - b.field("type", "text"); + b.field("type", textFieldName); b.field("analyzer", "my_stop_analyzer"); b.startObject("index_prefixes").field("min_chars", 2).field("max_chars", 10).endObject(); } b.endObject(); b.startObject("synfield"); { - b.field("type", "text"); + b.field("type", textFieldName); b.field("analyzer", "standard"); // "standard" will be replaced with MockSynonymAnalyzer b.field("index_phrases", true); b.startObject("index_prefixes").field("min_chars", 2).field("max_chars", 10).endObject(); @@ -999,7 +978,7 @@ public void testFastPhrasePrefixes() throws IOException { public void testSimpleMerge() throws IOException { XContentBuilder startingMapping = fieldMapping( - b -> b.field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", true) + b -> b.field("type", textFieldName).startObject("index_prefixes").endObject().field("index_phrases", true) ); MapperService mapperService = createMapperService(startingMapping); assertThat(mapperService.documentMapper().mappers().getMapper("field"), instanceOf(TextFieldMapper.class)); @@ -1008,19 +987,28 @@ public void testSimpleMerge() throws IOException { assertThat(mapperService.documentMapper().mappers().getMapper("field"), instanceOf(TextFieldMapper.class)); XContentBuilder differentPrefix = fieldMapping( - b -> b.field("type", "text").startObject("index_prefixes").field("min_chars", "3").endObject().field("index_phrases", true) + b -> b.field("type", textFieldName) + .startObject("index_prefixes") + .field("min_chars", "3") + .endObject() + .field("index_phrases", true) ); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, differentPrefix)); assertThat(e.getMessage(), containsString("Cannot update parameter [index_prefixes]")); XContentBuilder differentPhrases = fieldMapping( - b -> b.field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", false) + b -> b.field("type", textFieldName).startObject("index_prefixes").endObject().field("index_phrases", false) ); e = expectThrows(IllegalArgumentException.class, () -> merge(mapperService, differentPhrases)); assertThat(e.getMessage(), containsString("Cannot update parameter [index_phrases]")); XContentBuilder newField = mapping(b -> { - b.startObject("field").field("type", "text").startObject("index_prefixes").endObject().field("index_phrases", true).endObject(); + b.startObject("field") + .field("type", textFieldName) + .startObject("index_prefixes") + .endObject() + .field("index_phrases", true) + .endObject(); b.startObject("other_field").field("type", "keyword").endObject(); }); merge(mapperService, newField); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java index 0592a972db5e9..9c177bbec61fd 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java @@ -66,35 +66,39 @@ public class TextFieldTypeTests extends FieldTypeTestCase { - private static TextFieldType createFieldType() { - return new TextFieldType("field"); + TextFieldType createFieldType(boolean searchabe) { + if (searchabe) { + return new TextFieldType("field"); + } else { + return new TextFieldType("field", false, false, Collections.emptyMap()); + } } public void testIsAggregatableDependsOnFieldData() { - TextFieldType ft = createFieldType(); + TextFieldType ft = createFieldType(true); assertFalse(ft.isAggregatable()); ft.setFielddata(true); assertTrue(ft.isAggregatable()); } public void testTermQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", null)); assertEquals(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "fOo")), ft.termQueryCaseInsensitive("fOo", null)); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); } public void testTermsQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); List terms = new ArrayList<>(); terms.add(new BytesRef("foo")); terms.add(new BytesRef("bar")); assertEquals(new TermInSetQuery("field", terms), ft.termsQuery(Arrays.asList("foo", "bar"), null)); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.termsQuery(Arrays.asList("foo", "bar"), null) @@ -103,7 +107,7 @@ public void testTermsQuery() { } public void testRangeQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals( new TermRangeQuery("field", BytesRefs.toBytesRef("foo"), BytesRefs.toBytesRef("bar"), true, false), ft.rangeQuery("foo", "bar", true, false, null, null, null, MOCK_QSC) @@ -120,13 +124,13 @@ public void testRangeQuery() { } public void testRegexpQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals( new RegexpQuery(new Term("field", "foo.*")), ft.regexpQuery("foo.*", 0, 0, 10, CONSTANT_SCORE_BLENDED_REWRITE, MOCK_QSC) ); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.regexpQuery("foo.*", 0, 0, 10, null, MOCK_QSC) @@ -141,13 +145,13 @@ public void testRegexpQuery() { } public void testFuzzyQuery() { - MappedFieldType ft = createFieldType(); + MappedFieldType ft = createFieldType(true); assertEquals( new FuzzyQuery(new Term("field", "foo"), 2, 1, 50, true), ft.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) ); - MappedFieldType unsearchable = new TextFieldType("field", false, false, Collections.emptyMap()); + MappedFieldType unsearchable = createFieldType(false); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> unsearchable.fuzzyQuery("foo", Fuzziness.fromEdits(2), 1, 50, true, MOCK_QSC) @@ -162,7 +166,7 @@ public void testFuzzyQuery() { } public void testIndexPrefixes() { - TextFieldType ft = createFieldType(); + TextFieldType ft = createFieldType(true); ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType(ft, "field._index_prefix", 2, 10)); Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, false, randomMockShardContext()); @@ -222,7 +226,7 @@ public void testIndexPrefixes() { } public void testFetchSourceValue() throws IOException { - TextFieldType fieldType = createFieldType(); + TextFieldType fieldType = createFieldType(true); fieldType.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); assertEquals(List.of("value"), fetchSourceValue(fieldType, "value")); diff --git a/server/src/test/java/org/opensearch/index/query/SourceFieldMatchQueryTests.java b/server/src/test/java/org/opensearch/index/query/SourceFieldMatchQueryTests.java new file mode 100644 index 0000000000000..6af717a97b328 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/SourceFieldMatchQueryTests.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.store.Directory; +import org.opensearch.core.index.Index; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MapperServiceTestCase; +import org.opensearch.index.mapper.ParsedDocument; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; + +import static org.mockito.Mockito.when; + +public class SourceFieldMatchQueryTests extends MapperServiceTestCase { + + public void testAllPossibleScenarios() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("dessert"); + { + b.field("type", "match_only_text"); + } + b.endObject(); + })); + + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.sourcePath("dessert")).thenReturn(Set.of("dessert")); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + + String[] desserts = new String[] { "apple pie pie", "banana split pie", "chocolate cake" }; + List docs = new ArrayList<>(); + for (String dessert : desserts) { + docs.add(mapperService.documentMapper().parse(source(b -> b.field("dessert", dessert)))); + } + SourceFieldMatchQuery matchBoth = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchDelegate = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "juice").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchFilter = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "tart").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchNone = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "gulab").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "jamun").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + + SourceFieldMatchQuery matchMultipleDocs = new SourceFieldMatchQuery( + QueryBuilders.matchAllQuery().toQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(mapperService.indexAnalyzer())); + for (ParsedDocument d : docs) { + iw.addDocument(d.rootDoc()); + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(matchBoth, 10); + assertEquals(topDocs.totalHits.value, 1); + assertEquals(topDocs.scoreDocs[0].doc, 0); + + topDocs = searcher.search(matchDelegate, 10); + assertEquals(topDocs.totalHits.value, 0); + + topDocs = searcher.search(matchFilter, 10); + assertEquals(topDocs.totalHits.value, 0); + + topDocs = searcher.search(matchNone, 10); + assertEquals(topDocs.totalHits.value, 0); + + topDocs = searcher.search(matchMultipleDocs, 10); + assertEquals(topDocs.totalHits.value, 2); + // assert constant score + for (ScoreDoc scoreDoc : topDocs.scoreDocs) { + assertEquals(scoreDoc.score, 1.0, 0.00000000001); + } + } + } + } + + public void testSourceDisabled() throws IOException { + MapperService mapperService = createMapperService(topMapping(b -> b.startObject("_source").field("enabled", false).endObject())); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.sourcePath("dessert")).thenReturn(Set.of("dessert")); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("dessert", "pie").doToQuery(queryShardContext), // Filter query + queryShardContext.getFieldType("dessert"), + queryShardContext + ) + ); + assertEquals( + "SourceFieldMatchQuery error: unable to fetch fields from _source field: " + + "_source is disabled in the mappings for index [test_index]", + e.getMessage() + ); + } + + public void testMissingField() throws IOException { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("dessert"); + { + b.field("type", "match_only_text"); + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + when(queryShardContext.sourcePath("dessert")).thenReturn(Set.of("dessert")); + when(queryShardContext.index()).thenReturn(new Index("test_index", "uuid")); + + String[] desserts = new String[] { "apple pie pie", "banana split pie", "chocolate cake" }; + List docs = new ArrayList<>(); + for (String dessert : desserts) { + docs.add(mapperService.documentMapper().parse(source(b -> b.field("dessert", dessert)))); + } + SourceFieldMatchQuery matchDelegate = new SourceFieldMatchQuery( + QueryBuilders.matchQuery("dessert", "apple").doToQuery(queryShardContext), // Delegate query + QueryBuilders.matchQuery("username", "pie").doToQuery(queryShardContext), // Filter query missing field + queryShardContext.getFieldType("dessert"), + queryShardContext + ); + try (Directory dir = newDirectory()) { + IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(mapperService.indexAnalyzer())); + for (ParsedDocument d : docs) { + iw.addDocument(d.rootDoc()); + } + try (IndexReader reader = DirectoryReader.open(iw)) { + iw.close(); + IndexSearcher searcher = new IndexSearcher(reader); + TopDocs topDocs = searcher.search(matchDelegate, 10); + assertEquals(topDocs.totalHits.value, 0); + } + } + } +} diff --git a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java index 94c2e4ef7da62..ac78a0d1936ea 100644 --- a/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/mapper/MapperServiceTestCase.java @@ -239,7 +239,7 @@ protected final XContentBuilder fieldMapping(CheckedConsumer mapperService.fieldType(inv.getArguments()[0].toString())); @@ -254,6 +254,8 @@ QueryShardContext createQueryShardContext(MapperService mapperService) { when(queryShardContext.lookup()).thenReturn(new SearchLookup(mapperService, (ft, s) -> { throw new UnsupportedOperationException("search lookup not available"); })); + when(queryShardContext.getFieldType(any())).thenAnswer(inv -> mapperService.fieldType(inv.getArguments()[0].toString())); + when(queryShardContext.documentMapper(anyString())).thenReturn(mapperService.documentMapper()); return queryShardContext; } } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 82f15a590bea6..ac0447dbebf7e 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -103,6 +103,7 @@ import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.Mapper.BuilderContext; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.index.mapper.ObjectMapper.Nested; @@ -760,7 +761,8 @@ public void testSupportedFieldTypes() throws IOException { source.put("type", mappedType.getKey()); // Text is the only field that doesn't support DVs, instead FD - if (mappedType.getKey().equals(TextFieldMapper.CONTENT_TYPE) == false) { + if (mappedType.getKey().equals(TextFieldMapper.CONTENT_TYPE) == false + && mappedType.getKey().equals(MatchOnlyTextFieldMapper.CONTENT_TYPE) == false) { source.put("doc_values", "true"); } From bb3959d75c729c9213374db4109c628d1774491c Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Wed, 3 Jan 2024 04:23:11 -0800 Subject: [PATCH 27/81] Fix flaky testTerminateAfterEarlyTermination (#11683) Signed-off-by: Ticheng Lin --- .../search/query/QueryProfilePhaseTests.java | 74 +++++++++++++------ 1 file changed, 52 insertions(+), 22 deletions(-) diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index ba1600e6eb651..48ac2d3b5a804 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -615,12 +615,22 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getTimeBreakdown().get("score"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("score_count"), greaterThan(0L)); if (executor != null) { - assertThat(query.getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("max_score_count"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("min_score_count"), greaterThan(0L)); - assertThat(query.getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + long maxScore = query.getTimeBreakdown().get("max_score"); + long minScore = query.getTimeBreakdown().get("min_score"); + long avgScore = query.getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThan(0L)); + assertThat(minScore, greaterThanOrEqualTo(0L)); + assertThat(avgScore, greaterThanOrEqualTo(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThan(0L)); + assertThat(minScoreCount, greaterThanOrEqualTo(0L)); + assertThat(avgScoreCount, greaterThanOrEqualTo(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); } assertThat(query.getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getTimeBreakdown().get("create_weight_count"), equalTo(1L)); @@ -744,30 +754,50 @@ public void testTerminateAfterEarlyTermination() throws Exception { assertThat(query.getProfiledChildren().get(0).getTime(), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score"), greaterThanOrEqualTo(0L)); + assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(0L)); if (executor != null) { - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score_count"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score_count"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + long maxScore = query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score"); + long minScore = query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score"); + long avgScore = query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getProfiledChildren().get(0).getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getProfiledChildren().get(0).getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getProfiledChildren().get(0).getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThanOrEqualTo(0L)); + assertThat(minScore, greaterThanOrEqualTo(0L)); + assertThat(avgScore, greaterThanOrEqualTo(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThanOrEqualTo(0L)); + assertThat(minScoreCount, greaterThanOrEqualTo(0L)); + assertThat(avgScoreCount, greaterThanOrEqualTo(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); } assertThat(query.getProfiledChildren().get(1).getQueryName(), equalTo("TermQuery")); assertThat(query.getProfiledChildren().get(1).getTime(), greaterThan(0L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight"), greaterThan(0L)); assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("create_weight_count"), equalTo(1L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThan(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score"), greaterThanOrEqualTo(0L)); + assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("score_count"), greaterThanOrEqualTo(0L)); if (executor != null) { - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score_count"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score_count"), greaterThan(0L)); - assertThat(query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score_count"), greaterThan(0L)); + long maxScore = query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score"); + long minScore = query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score"); + long avgScore = query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score"); + long maxScoreCount = query.getProfiledChildren().get(1).getTimeBreakdown().get("max_score_count"); + long minScoreCount = query.getProfiledChildren().get(1).getTimeBreakdown().get("min_score_count"); + long avgScoreCount = query.getProfiledChildren().get(1).getTimeBreakdown().get("avg_score_count"); + assertThat(maxScore, greaterThanOrEqualTo(0L)); + assertThat(minScore, greaterThanOrEqualTo(0L)); + assertThat(avgScore, greaterThanOrEqualTo(0L)); + assertThat(maxScore, greaterThanOrEqualTo(avgScore)); + assertThat(avgScore, greaterThanOrEqualTo(minScore)); + assertThat(maxScoreCount, greaterThanOrEqualTo(0L)); + assertThat(minScoreCount, greaterThanOrEqualTo(0L)); + assertThat(avgScoreCount, greaterThanOrEqualTo(0L)); + assertThat(maxScoreCount, greaterThanOrEqualTo(avgScoreCount)); + assertThat(avgScoreCount, greaterThanOrEqualTo(minScoreCount)); } }, collector -> { assertThat(collector.getReason(), equalTo("search_terminate_after_count")); From 84404689d6259b317c3eaaf94895f369e9920434 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 3 Jan 2024 11:14:52 -0800 Subject: [PATCH 28/81] Fix testIndexDeletionDuringSnapshotCreationInQueue flaky test (#11726) * Fix testIndexDeletionDuringSnapshotCreationInQueue flaky test Signed-off-by: Suraj Singh * Update comment Signed-off-by: Suraj Singh --------- Signed-off-by: Suraj Singh --- .../snapshots/DedicatedClusterSnapshotRestoreIT.java | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index bc591de45dd86..7a52c8aa5018e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -1457,6 +1457,13 @@ public void testIndexDeletionDuringSnapshotCreationInQueue() throws Exception { clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap").get(); ensureGreen("test-idx"); + + // Wait for snapshot process to complete to prevent conflict with repository clean up + assertBusy(() -> { + SnapshotInfo snapshotInfo = getSnapshot("test-repo", "test-snap-2"); + assertTrue(snapshotInfo.state().completed()); + assertEquals(SnapshotState.PARTIAL, snapshotInfo.state()); + }, 1, TimeUnit.MINUTES); } private long calculateTotalFilesSize(List files) { From 16d457d3f018893f6268a9ffa52d3d3f73f1a87a Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Wed, 3 Jan 2024 15:28:45 -0600 Subject: [PATCH 29/81] Switch to more reliable OpenSearch Lucene snapshot location (#11728) * Switched to more reliable OpenSearch Lucene snapshot location - Related https://github.com/opensearch-project/opensearch-build/issues/3874 Signed-off-by: Peter Nied Signed-off-by: Peter Nied * Changelog entry Signed-off-by: Peter Nied Signed-off-by: Peter Nied --------- Signed-off-by: Peter Nied Signed-off-by: Peter Nied --- CHANGELOG.md | 2 +- .../java/org/opensearch/gradle/RepositoriesSetupPlugin.java | 2 +- gradle/code-coverage.gradle | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 026606ff57d65..f5191c5e04a41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,7 +57,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Return 409 Conflict HTTP status instead of 503 on failure to concurrently execute snapshots ([#8986](https://github.com/opensearch-project/OpenSearch/pull/5855)) - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) - +- Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) ### Deprecated diff --git a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java index 63b88f671c84c..8ecfbf40b6c62 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/RepositoriesSetupPlugin.java @@ -94,7 +94,7 @@ public static void configureRepositories(Project project) { String revision = matcher.group(1); MavenArtifactRepository luceneRepo = repos.maven(repo -> { repo.setName("lucene-snapshots"); - repo.setUrl("https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/"); + repo.setUrl("https://artifacts.opensearch.org/snapshots/lucene/"); }); repos.exclusiveContent(exclusiveRepo -> { exclusiveRepo.filter( diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index dfb4ddba24113..822b471e2e034 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -13,7 +13,7 @@ repositories { gradlePluginPortal() // TODO: Find the way to use the repositories from RepositoriesSetupPlugin maven { - url = "https://d1nvenhzbhpy0q.cloudfront.net/snapshots/lucene/" + url = "https://artifacts.opensearch.org/snapshots/lucene/" } } From 36bd67465a12bbc56631f49a617429529a8365f9 Mon Sep 17 00:00:00 2001 From: rayshrey <121871912+rayshrey@users.noreply.github.com> Date: Thu, 4 Jan 2024 18:59:18 +0530 Subject: [PATCH 30/81] Add deleted doc count in _cat/shards (#11678) Signed-off-by: Shreyansh Ray --- CHANGELOG.md | 1 + .../test/cat.shards/10_basic.yml | 94 ++++++++++++++++++- .../rest/action/cat/RestShardsAction.java | 2 + .../action/cat/RestShardsActionTests.java | 10 +- 4 files changed, 104 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5191c5e04a41..aa2086646fbac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -181,6 +181,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) - Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) - Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) +- Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) ### Deprecated diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml index f80c9f9c0bc80..b572ed9e62ea9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yml @@ -1,13 +1,103 @@ "Help": - skip: - version: " - 2.3.99" + version: " - 2.11.99" + reason: deleted docs added in 2.12.0 + features: node_selector + - do: + cat.shards: + help: true + node_selector: + version: "2.12.0 - " + + - match: + $body: | + /^ index .+ \n + shard .+ \n + prirep .+ \n + state .+ \n + docs .+ \n + store .+ \n + ip .+ \n + id .+ \n + node .+ \n + sync_id .+ \n + unassigned.reason .+ \n + unassigned.at .+ \n + unassigned.for .+ \n + unassigned.details .+ \n + recoverysource.type .+ \n + completion.size .+ \n + fielddata.memory_size .+ \n + fielddata.evictions .+ \n + query_cache.memory_size .+ \n + query_cache.evictions .+ \n + flush.total .+ \n + flush.total_time .+ \n + get.current .+ \n + get.time .+ \n + get.total .+ \n + get.exists_time .+ \n + get.exists_total .+ \n + get.missing_time .+ \n + get.missing_total .+ \n + indexing.delete_current .+ \n + indexing.delete_time .+ \n + indexing.delete_total .+ \n + indexing.index_current .+ \n + indexing.index_time .+ \n + indexing.index_total .+ \n + indexing.index_failed .+ \n + merges.current .+ \n + merges.current_docs .+ \n + merges.current_size .+ \n + merges.total .+ \n + merges.total_docs .+ \n + merges.total_size .+ \n + merges.total_time .+ \n + refresh.total .+ \n + refresh.time .+ \n + refresh.external_total .+ \n + refresh.external_time .+ \n + refresh.listeners .+ \n + search.fetch_current .+ \n + search.fetch_time .+ \n + search.fetch_total .+ \n + search.open_contexts .+ \n + search.query_current .+ \n + search.query_time .+ \n + search.query_total .+ \n + search.scroll_current .+ \n + search.scroll_time .+ \n + search.scroll_total .+ \n + search.point_in_time_current .+ \n + search.point_in_time_time .+ \n + search.point_in_time_total .+ \n + segments.count .+ \n + segments.memory .+ \n + segments.index_writer_memory .+ \n + segments.version_map_memory .+ \n + segments.fixed_bitset_memory .+ \n + seq_no.max .+ \n + seq_no.local_checkpoint .+ \n + seq_no.global_checkpoint .+ \n + warmer.current .+ \n + warmer.total .+ \n + warmer.total_time .+ \n + path.data .+ \n + path.state .+ \n + docs.deleted .+ \n + $/ +--- +"Help from 2.4.0 to 2.11.0": + - skip: + version: " - 2.3.99 , 2.12.0 - " reason: point in time stats were added in 2.4.0 features: node_selector - do: cat.shards: help: true node_selector: - version: "2.4.0 - " + version: "2.4.0 - 2.11.99" - match: $body: | diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java index 5d5f55c7f4639..d0d00e4c4596a 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestShardsAction.java @@ -283,6 +283,7 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("path.data", "alias:pd,dataPath;default:false;text-align:right;desc:shard data path"); table.addCell("path.state", "alias:ps,statsPath;default:false;text-align:right;desc:shard state path"); + table.addCell("docs.deleted", "alias:dd,docsDeleted;default:false;text-align:right;desc:number of deleted docs in shard"); table.endHeaders(); return table; @@ -448,6 +449,7 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe table.addCell(getOrNull(shardStats, ShardStats::getDataPath, s -> s)); table.addCell(getOrNull(shardStats, ShardStats::getStatePath, s -> s)); + table.addCell(getOrNull(commonStats, CommonStats::getDocs, DocsStats::getDeleted)); table.endRow(); } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java index a8679a087216d..73f83a5642bb4 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestShardsActionTests.java @@ -46,6 +46,7 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.Table; +import org.opensearch.index.shard.DocsStats; import org.opensearch.index.shard.ShardPath; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; @@ -65,6 +66,8 @@ public class RestShardsActionTests extends OpenSearchTestCase { public void testBuildTable() { final int numShards = randomIntBetween(1, 5); + long numDocs = randomLongBetween(0, 10000); + long numDeletedDocs = randomLongBetween(0, 100); DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); List shardRoutings = new ArrayList<>(numShards); @@ -76,10 +79,12 @@ public void testBuildTable() { Path path = createTempDir().resolve("indices") .resolve(shardRouting.shardId().getIndex().getUUID()) .resolve(String.valueOf(shardRouting.shardId().id())); + CommonStats commonStats = new CommonStats(); + commonStats.docs = new DocsStats(numDocs, numDeletedDocs, 0); ShardStats shardStats = new ShardStats( shardRouting, new ShardPath(false, path, path, shardRouting.shardId()), - null, + commonStats, null, null, null @@ -120,6 +125,7 @@ public void testBuildTable() { assertThat(headers.get(6).value, equalTo("ip")); assertThat(headers.get(7).value, equalTo("id")); assertThat(headers.get(8).value, equalTo("node")); + assertThat(headers.get(74).value, equalTo("docs.deleted")); final List> rows = table.getRows(); assertThat(rows.size(), equalTo(numShards)); @@ -132,10 +138,12 @@ public void testBuildTable() { assertThat(row.get(1).value, equalTo(shardRouting.getId())); assertThat(row.get(2).value, equalTo(shardRouting.primary() ? "p" : "r")); assertThat(row.get(3).value, equalTo(shardRouting.state())); + assertThat(row.get(4).value, equalTo(shardStats.getStats().getDocs().getCount())); assertThat(row.get(6).value, equalTo(localNode.getHostAddress())); assertThat(row.get(7).value, equalTo(localNode.getId())); assertThat(row.get(72).value, equalTo(shardStats.getDataPath())); assertThat(row.get(73).value, equalTo(shardStats.getStatePath())); + assertThat(row.get(74).value, equalTo(shardStats.getStats().getDocs().getDeleted())); } } } From f6475152aece1083add15ac6cac31758eb711d9e Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Thu, 4 Jan 2024 21:48:32 +0800 Subject: [PATCH 31/81] Fix simulate remove ingest processor throwing illegal_argument_exception (#11607) * Fix simulate remove ingest processor throwing illegal_argument_exception Signed-off-by: Gao Binlong * modify change log Signed-off-by: Gao Binlong * Create a new test mothod Signed-off-by: Gao Binlong * Use old method to get field value Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 2 +- .../ingest/common/RemoveProcessor.java | 20 ++-- .../ingest/common/RemoveProcessorTests.java | 97 +++++++++++++++++++ .../test/ingest/290_remove_processor.yml | 92 ++++++++++++++++++ 4 files changed, 201 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aa2086646fbac..9b5f2dc5a16f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -170,7 +170,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add telemetry tracer/metric enable flag and integ test. ([#10395](https://github.com/opensearch-project/OpenSearch/pull/10395)) - Performance improvement for Datetime field caching ([#4558](https://github.com/opensearch-project/OpenSearch/issues/4558)) - Add instrumentation for indexing in transport bulk action and transport shard bulk action. ([#10273](https://github.com/opensearch-project/OpenSearch/pull/10273)) -- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895)) +- Disallow removing some metadata fields by remove ingest processor ([#10895](https://github.com/opensearch-project/OpenSearch/pull/10895), [#11607](https://github.com/opensearch-project/OpenSearch/pull/11607)) - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) - Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) - Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index bb3d4bca47859..a48cfd87b78c3 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -88,16 +88,18 @@ public IngestDocument execute(IngestDocument document) { throw new IllegalArgumentException("cannot remove metadata field [" + path + "]"); } // removing _id is disallowed when there's an external version specified in the request - String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); if (path.equals(IngestDocument.Metadata.ID.getFieldName()) - && !Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { - Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class); - throw new IllegalArgumentException( - "cannot remove metadata field [_id] when specifying external version for the document, version: " - + version - + ", version_type: " - + versionType - ); + && document.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); + if (!Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { + Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class, true); + throw new IllegalArgumentException( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + version + + ", version_type: " + + versionType + ); + } } document.removeField(path); }); diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index 1a5630a4730f2..c138ad606d2e5 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.common.lucene.uid.Versions; import org.opensearch.index.VersionType; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; @@ -181,4 +182,100 @@ public void testRemoveMetadataField() throws Exception { } } } + + public void testRemoveDocumentId() throws Exception { + Map config = new HashMap<>(); + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + String processorTag = randomAlphaOfLength(10); + + // test remove _id when _version_type is external + IngestDocument ingestDocumentWithExternalVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + 1L, + VersionType.EXTERNAL, + RandomDocumentPicks.randomSource(random()) + ); + + Processor processorForExternalVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + assertThrows( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + 1 + + ", version_type: " + + VersionType.EXTERNAL, + IllegalArgumentException.class, + () -> processorForExternalVersionType.execute(ingestDocumentWithExternalVersionType) + ); + + // test remove _id when _version_type is external_gte + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + IngestDocument ingestDocumentWithExternalGTEVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + 1L, + VersionType.EXTERNAL_GTE, + RandomDocumentPicks.randomSource(random()) + ); + + Processor processorForExternalGTEVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + assertThrows( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + 1 + + ", version_type: " + + VersionType.EXTERNAL_GTE, + IllegalArgumentException.class, + () -> processorForExternalGTEVersionType.execute(ingestDocumentWithExternalGTEVersionType) + ); + + // test remove _id when _version_type is internal + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + IngestDocument ingestDocumentWithInternalVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + Versions.MATCH_ANY, + VersionType.INTERNAL, + RandomDocumentPicks.randomSource(random()) + ); + + Processor processorForInternalVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + processorForInternalVersionType.execute(ingestDocumentWithInternalVersionType); + assertThat(ingestDocumentWithInternalVersionType.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(false)); + + // test remove _id when _version_type is null + config.put("field", IngestDocument.Metadata.ID.getFieldName()); + IngestDocument ingestDocumentWithNoVersionType = new IngestDocument( + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + RandomDocumentPicks.randomString(random()), + null, + null, + RandomDocumentPicks.randomSource(random()) + ); + Processor processorForNullVersionType = new RemoveProcessor.Factory(TestTemplateService.instance()).create( + null, + processorTag, + null, + config + ); + processorForNullVersionType.execute(ingestDocumentWithNoVersionType); + assertThat(ingestDocumentWithNoVersionType.hasField(IngestDocument.Metadata.ID.getFieldName()), equalTo(false)); + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml index 4811769d04f0e..6668b468f8edc 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml @@ -5,6 +5,69 @@ teardown: id: "my_pipeline" ignore: 404 +--- +"Test simulate API works well with remove processor": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + } + - match: { acknowledged: true } + + # test simulating existing pipeline works well + - do: + ingest.simulate: + id: "my_pipeline" + body: > + { + "docs": [ + { + "_source": { + "foo": "bar", + "bar": "zoo" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._source: { "foo": "bar" } } + + # test simulating inflight pipeline works well + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "{{foo}}" + } + } + ] + }, + "docs": [ + { + "_source": { + "foo": "bar", + "bar": "zoo" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._source: { "foo": "bar" } } + --- "Test remove processor with non-existing field and without ignore_missing": - do: @@ -227,3 +290,32 @@ teardown: version: 1 version_type: "external" body: { message: "foo bar baz" } + + # test simulating pipeline with removing _id + - do: + ingest.simulate: + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "remove" : { + "field" : "_id" + } + } + ] + }, + "docs": [ + { + "_version_type": "external_gte", + "_version": 1, + "_source": { + "foo": "bar", + "bar": "zoo" + } + } + ] + } + - match: { docs.0.error.type: "illegal_argument_exception" } + - match: { docs.0.error.reason: "cannot remove metadata field [_id] when specifying external version for the document, version: 1, version_type: external_gte" } From 178a7a073e1236d552f697e7f67d32f04a7fe8ec Mon Sep 17 00:00:00 2001 From: Harsha Vamsi Kalluri Date: Thu, 4 Jan 2024 10:12:35 -0800 Subject: [PATCH 32/81] Fixes Numeric exact match queries to use range queries internally (#11209) * Updating numeric term and terms queries to use IODVQ Signed-off-by: Harsha Vamsi Kalluri * Addressing comments Signed-off-by: Harsha Vamsi Kalluri * Fix formatting Signed-off-by: Harsha Vamsi Kalluri * Fix changelog Signed-off-by: Harsha Vamsi Kalluri * Addressing more comments + adding tests Signed-off-by: Harsha Vamsi Kalluri * renaming yaml test Signed-off-by: Harsha Vamsi Kalluri * Adding skip for bwc Signed-off-by: Harsha Vamsi Kalluri * Adding new SortedUnsignedLongDocValuesSetQuery to allow for BitInteger Terms query Signed-off-by: Harsha Vamsi Kalluri * Fixing some tests Signed-off-by: Harsha Vamsi Kalluri * Remove duplicate skip Signed-off-by: Harsha Vamsi Kalluri * Remove unused points declaration Signed-off-by: Harsha Vamsi Kalluri * Change unsigned exact query to be consistent Signed-off-by: Harsha Vamsi Kalluri * Use slowExactQuery from Unsigned Set Query Signed-off-by: Harsha Vamsi Kalluri * Merging different yaml tests into a single test Signed-off-by: Harsha Vamsi Kalluri * Updating test case for main Signed-off-by: Harsha Vamsi Kalluri * Fix changelog Signed-off-by: Harsha Vamsi Kalluri --------- Signed-off-by: Harsha Vamsi Kalluri --- CHANGELOG.md | 1 + .../index/mapper/ScaledFloatFieldMapper.java | 17 +- .../mapper/ScaledFloatFieldTypeTests.java | 19 +- .../test/search/340_doc_values_field.yml | 1147 +++++++++++++++++ .../test/search/340_keyword_doc_values.yml | 46 - .../org/apache/lucene/util/LongHashSet.java | 136 ++ .../SortedUnsignedLongDocValuesSetQuery.java | 176 +++ .../index/mapper/NumberFieldMapper.java | 290 ++++- .../index/mapper/NumberFieldTypeTests.java | 90 +- .../index/query/TermQueryBuilderTests.java | 2 + .../index/query/TermsQueryBuilderTests.java | 2 + 11 files changed, 1795 insertions(+), 131 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml create mode 100644 server/src/main/java/org/apache/lucene/util/LongHashSet.java create mode 100644 server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b5f2dc5a16f2..ac15176af56f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -176,6 +176,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) - Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) +- Improved performance of numeric exact-match queries ([#11209](https://github.com/opensearch-project/OpenSearch/pull/11209)) - Change error message when per shard document limit is breached ([#11312](https://github.com/opensearch-project/OpenSearch/pull/11312)) - Improve boolean parsing performance ([#11308](https://github.com/opensearch-project/OpenSearch/pull/11308)) - Interpret byte array as primitive using VarHandles ([#11362](https://github.com/opensearch-project/OpenSearch/pull/11362)) diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java index 7be241017f683..400d867296e5f 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/ScaledFloatFieldMapper.java @@ -199,9 +199,9 @@ public String typeName() { @Override public Query termQuery(Object value, QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); long scaledValue = Math.round(scale(value)); - Query query = NumberFieldMapper.NumberType.LONG.termQuery(name(), scaledValue); + Query query = NumberFieldMapper.NumberType.LONG.termQuery(name(), scaledValue, hasDocValues(), isSearchable()); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -210,13 +210,18 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); List scaledValues = new ArrayList<>(values.size()); for (Object value : values) { long scaledValue = Math.round(scale(value)); scaledValues.add(scaledValue); } - Query query = NumberFieldMapper.NumberType.LONG.termsQuery(name(), Collections.unmodifiableList(scaledValues)); + Query query = NumberFieldMapper.NumberType.LONG.termsQuery( + name(), + Collections.unmodifiableList(scaledValues), + hasDocValues(), + isSearchable() + ); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -225,7 +230,7 @@ public Query termsQuery(List values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); + failIfNotIndexedAndNoDocValues(); Long lo = null; if (lowerTerm != null) { double dValue = scale(lowerTerm); @@ -242,7 +247,7 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower } hi = Math.round(Math.floor(dValue)); } - Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues(), context); + Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues(), isSearchable(), context); if (boost() != 1f) { query = new BoostQuery(query, boost()); } diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java index be12c49321b87..d83811e6668eb 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -34,11 +34,13 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.LongField; import org.apache.lucene.document.LongPoint; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -63,7 +65,9 @@ public void testTermQuery() { ); double value = (randomDouble() * 2 - 1) * 10000; long scaledValue = Math.round(value * ft.getScalingFactor()); - assertEquals(LongPoint.newExactQuery("scaled_float", scaledValue), ft.termQuery(value, null)); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery("scaled_float", scaledValue); + Query query = new IndexOrDocValuesQuery(LongPoint.newExactQuery("scaled_float", scaledValue), dvQuery); + assertEquals(query, ft.termQuery(value, null)); } public void testTermsQuery() { @@ -75,7 +79,7 @@ public void testTermsQuery() { long scaledValue1 = Math.round(value1 * ft.getScalingFactor()); double value2 = (randomDouble() * 2 - 1) * 10000; long scaledValue2 = Math.round(value2 * ft.getScalingFactor()); - assertEquals(LongPoint.newSetQuery("scaled_float", scaledValue1, scaledValue2), ft.termsQuery(Arrays.asList(value1, value2), null)); + assertEquals(LongField.newSetQuery("scaled_float", scaledValue1, scaledValue2), ft.termsQuery(Arrays.asList(value1, value2), null)); } public void testRangeQuery() throws IOException { @@ -112,7 +116,16 @@ public void testRangeQuery() throws IOException { Double u = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000; boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); - Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, MOCK_QSC); + Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery( + "double", + l, + u, + includeLower, + includeUpper, + false, + true, + MOCK_QSC + ); Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, MOCK_QSC); assertEquals(searcher.count(doubleQ), searcher.count(scaledFloatQ)); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml new file mode 100644 index 0000000000000..f3281e35ac8e6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -0,0 +1,1147 @@ +setup: + - skip: + features: [ "headers" ] + version: " - 2.99.99" + reason: "searching with only doc_values was added in 3.0.0" +--- +"search on fields with both index and doc_values enabled": + - do: + indices.create: + index: test-iodvq + body: + mappings: + properties: + some_keyword: + type: keyword + index: true + doc_values: true + byte: + type: byte + index: true + doc_values: true + double: + type: double + index: true + doc_values: true + float: + type: float + index: true + doc_values: true + half_float: + type: half_float + index: true + doc_values: true + integer: + type: integer + index: true + doc_values: true + long: + type: long + index: true + doc_values: true + short: + type: short + index: true + doc_values: true + unsigned_long: + type: unsigned_long + index: true + doc_values: true + + - do: + bulk: + index: test-iodvq + refresh: true + body: + - '{"index": {"_index": "test-iodvq", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "index": { "_index": "test-iodvq", "_id": "2" }}' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "index": { "_index": "test-iodvq", "_id": "3" } }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total: 2 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + half_float: 400.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + float: 800.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + double: 100.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + byte: 120 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + short: 150 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + integer: 1291 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + long: 13456 + + - match: { hits.total: 1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + term: + unsigned_long: 10223372036854775800 + + - match: { hits.total: 1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + half_float: [ 400.0, 401.0 ] + + - match: { hits.total: 2 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + float: [ 800.0, 801.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + byte: [ 120, 121 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + double: [ 100.0, 101.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + short: [ 150, 151 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + integer: [ 1290, 1291 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + long: [ 13456, 13457 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + terms: + unsigned_long: [ 10223372036854775800, 10223372036854775801 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + half_float: { + gte: 401.0, + lte: 402.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + float: { + gte: 801.0, + lte: 802.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + byte: { + gte: 120, + lte: 121 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + double: { + gte: 101.0, + lte: 102.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + short: { + gte: 151, + lte: 152 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + integer: { + gte: 1291, + lte: 1292 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + long: { + gte: 13457, + lte: 13458 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-iodvq + body: + query: + range: { + unsigned_long: { + gte: 10223372036854775801, + lte: 10223372036854775802 + }, + } + + - match: { hits.total: 2 } + +--- +"search on fields with only index enabled": + - do: + indices.create: + index: test-index + body: + mappings: + properties: + some_keyword: + type: keyword + index: true + doc_values: false + byte: + type: byte + index: true + doc_values: false + double: + type: double + index: true + doc_values: false + float: + type: float + index: true + doc_values: false + half_float: + type: half_float + index: true + doc_values: false + integer: + type: integer + index: true + doc_values: false + long: + type: long + index: true + doc_values: false + short: + type: short + index: true + doc_values: false + unsigned_long: + type: unsigned_long + index: true + doc_values: false + + - do: + bulk: + index: test-index + refresh: true + body: + - '{"index": {"_index": "test-index", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "index": { "_index": "test-index", "_id": "2" }}' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "index": { "_index": "test-index", "_id": "3" } }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total: 2 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + half_float: 400.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + float: 800.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + double: 100.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + byte: 120 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + short: 150 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + integer: 1291 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + long: 13456 + + - match: { hits.total: 1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + term: + unsigned_long: 10223372036854775800 + + - match: { hits.total: 1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + half_float: [ 400.0, 401.0 ] + + - match: { hits.total: 2 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + float: [ 800.0, 801.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + byte: [ 120, 121 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + double: [ 100.0, 101.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + short: [ 150, 151 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + integer: [ 1290, 1291 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + long: [ 13456, 13457 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + terms: + unsigned_long: [ 10223372036854775800, 10223372036854775801 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + half_float: { + gte: 401.0, + lte: 402.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + float: { + gte: 801.0, + lte: 802.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + byte: { + gte: 120, + lte: 121 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + double: { + gte: 101.0, + lte: 102.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + short: { + gte: 151, + lte: 152 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + integer: { + gte: 1291, + lte: 1292 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + long: { + gte: 13457, + lte: 13458 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-index + body: + query: + range: { + unsigned_long: { + gte: 10223372036854775801, + lte: 10223372036854775802 + }, + } + + - match: { hits.total: 2 } + +--- +"search on fields with only doc_values enabled": + - do: + indices.create: + index: test-doc-values + body: + mappings: + properties: + some_keyword: + type: keyword + index: false + doc_values: true + byte: + type: byte + index: false + doc_values: true + double: + type: double + index: false + doc_values: true + float: + type: float + index: false + doc_values: true + half_float: + type: half_float + index: false + doc_values: true + integer: + type: integer + index: false + doc_values: true + long: + type: long + index: false + doc_values: true + short: + type: short + index: false + doc_values: true + unsigned_long: + type: unsigned_long + index: false + doc_values: true + + - do: + bulk: + index: test-doc-values + refresh: true + body: + - '{"index": {"_index": "test-doc-values", "_id": "1" }}' + - '{ "some_keyword": "ingesting some random keyword data", "byte": 120, "double": 100.0, "float": "800.0", "half_float": "400.0", "integer": 1290, "long": 13456, "short": 150, "unsigned_long": 10223372036854775800 }' + - '{ "index": { "_index": "test-doc-values", "_id": "2" }}' + - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801 }' + - '{ "index": { "_index": "test-doc-values", "_id": "3" } }' + - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802 }' + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + prefix: + some_keyword: "ing" + + - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + "some_keyword": { + "lt": 500 + } } + + - match: { hits.total: 2 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + half_float: 400.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + float: 800.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + double: 100.0 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + byte: 120 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + short: 150 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + integer: 1291 + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + long: 13456 + + - match: { hits.total: 1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: + unsigned_long: 10223372036854775800 + + - match: { hits.total: 1 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + half_float: [ 400.0, 401.0 ] + + - match: { hits.total: 2 } + + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + float: [ 800.0, 801.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + byte: [ 120, 121 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + double: [ 100.0, 101.0 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + short: [ 150, 151 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + integer: [ 1290, 1291 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + long: [ 13456, 13457 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + terms: + unsigned_long: [ 10223372036854775800, 10223372036854775801 ] + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + half_float: { + gte: 401.0, + lte: 402.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + float: { + gte: 801.0, + lte: 802.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + byte: { + gte: 120, + lte: 121 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + double: { + gte: 101.0, + lte: 102.0 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + short: { + gte: 151, + lte: 152 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + integer: { + gte: 1291, + lte: 1292 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + long: { + gte: 13457, + lte: 13458 + }, + } + + - match: { hits.total: 2 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + range: { + unsigned_long: { + gte: 10223372036854775801, + lte: 10223372036854775802 + }, + } + + - match: { hits.total: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml deleted file mode 100644 index 8829e7b100fdd..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_keyword_doc_values.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -"search on keyword fields with doc_values enabled": - - do: - indices.create: - index: test - body: - mappings: - properties: - "some_keyword": - type: "keyword" - index: true - doc_values: true - - - do: - bulk: - index: test - refresh: true - body: - - '{"index": {"_index": "test", "_id": "1" }}' - - '{ "some_keyword": "ingesting some random keyword data" }' - - '{ "index": { "_index": "test", "_id": "2" }}' - - '{ "some_keyword": "400" }' - - '{ "index": { "_index": "test", "_id": "3" } }' - - '{ "some_keyword": "5" }' - - - do: - search: - index: test - body: - query: - prefix: - some_keyword: "ing" - - - match: { hits.hits.0._source.some_keyword: "ingesting some random keyword data" } - - - do: - search: - index: test - body: - query: - range: { - "some_keyword": { - "lt": 500 - } } - - - match: { hits.total.value: 2 } diff --git a/server/src/main/java/org/apache/lucene/util/LongHashSet.java b/server/src/main/java/org/apache/lucene/util/LongHashSet.java new file mode 100644 index 0000000000000..a463e8a189585 --- /dev/null +++ b/server/src/main/java/org/apache/lucene/util/LongHashSet.java @@ -0,0 +1,136 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.apache.lucene.util; + +import org.apache.lucene.util.packed.PackedInts; + +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** Set of longs, optimized for docvalues usage */ +public final class LongHashSet implements Accountable { + private static final long BASE_RAM_BYTES = RamUsageEstimator.shallowSizeOfInstance(LongHashSet.class); + + private static final long MISSING = Long.MIN_VALUE; + + final long[] table; + final int mask; + final boolean hasMissingValue; + final int size; + /** minimum value in the set, or Long.MAX_VALUE for an empty set */ + public final long minValue; + /** maximum value in the set, or Long.MIN_VALUE for an empty set */ + public final long maxValue; + + /** Construct a set. Values must be in sorted order. */ + public LongHashSet(long[] values) { + int tableSize = Math.toIntExact(values.length * 3L / 2); + tableSize = 1 << PackedInts.bitsRequired(tableSize); // make it a power of 2 + assert tableSize >= values.length * 3L / 2; + table = new long[tableSize]; + Arrays.fill(table, MISSING); + mask = tableSize - 1; + boolean hasMissingValue = false; + int size = 0; + long previousValue = Long.MIN_VALUE; // for assert + for (long value : values) { + if (value == MISSING) { + size += hasMissingValue ? 0 : 1; + hasMissingValue = true; + } else if (add(value)) { + ++size; + } + assert value >= previousValue : "values must be provided in sorted order"; + previousValue = value; + } + this.hasMissingValue = hasMissingValue; + this.size = size; + this.minValue = values.length == 0 ? Long.MAX_VALUE : values[0]; + this.maxValue = values.length == 0 ? Long.MIN_VALUE : values[values.length - 1]; + } + + private boolean add(long l) { + assert l != MISSING; + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + table[i] = l; + return true; + } else if (table[i] == l) { + // already added + return false; + } + } + } + + /** + * check for membership in the set. + * + *

You should use {@link #minValue} and {@link #maxValue} to guide/terminate iteration before + * calling this. + */ + public boolean contains(long l) { + if (l == MISSING) { + return hasMissingValue; + } + final int slot = Long.hashCode(l) & mask; + for (int i = slot;; i = (i + 1) & mask) { + if (table[i] == MISSING) { + return false; + } else if (table[i] == l) { + return true; + } + } + } + + /** returns a stream of all values contained in this set */ + LongStream stream() { + LongStream stream = Arrays.stream(table).filter(v -> v != MISSING); + if (hasMissingValue) { + stream = LongStream.concat(LongStream.of(MISSING), stream); + } + return stream; + } + + @Override + public int hashCode() { + return Objects.hash(size, minValue, maxValue, mask, hasMissingValue, Arrays.hashCode(table)); + } + + @Override + public boolean equals(Object obj) { + if (obj != null && obj instanceof LongHashSet) { + LongHashSet that = (LongHashSet) obj; + return size == that.size + && minValue == that.minValue + && maxValue == that.maxValue + && mask == that.mask + && hasMissingValue == that.hasMissingValue + && Arrays.equals(table, that.table); + } + return false; + } + + @Override + public String toString() { + return stream().mapToObj(String::valueOf).collect(Collectors.joining(", ", "[", "]")); + } + + /** number of elements in the set */ + public int size() { + return size; + } + + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES + RamUsageEstimator.sizeOfObject(table); + } +} diff --git a/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java new file mode 100644 index 0000000000000..669dbb1e1bfc7 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/document/SortedUnsignedLongDocValuesSetQuery.java @@ -0,0 +1,176 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.document; + +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryVisitor; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.LongHashSet; + +import java.io.IOException; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.Objects; + +/** + * The {@link org.apache.lucene.document.SortedNumericDocValuesSetQuery} implementation for unsigned long numeric data type. + * + * @opensearch.internal + */ +public abstract class SortedUnsignedLongDocValuesSetQuery extends Query { + + private final String field; + private final LongHashSet numbers; + + SortedUnsignedLongDocValuesSetQuery(String field, BigInteger[] numbers) { + this.field = Objects.requireNonNull(field); + Arrays.sort(numbers); + this.numbers = new LongHashSet(Arrays.stream(numbers).mapToLong(n -> n.longValue()).toArray()); + } + + @Override + public String toString(String field) { + return new StringBuilder().append(field).append(": ").append(numbers.toString()).toString(); + } + + @Override + public void visit(QueryVisitor visitor) { + if (visitor.acceptField(field)) { + visitor.visitLeaf(this); + } + } + + @Override + public Query rewrite(IndexSearcher indexSearcher) throws IOException { + if (numbers.size() == 0) { + return new MatchNoDocsQuery(); + } + return super.rewrite(indexSearcher); + } + + @Override + public boolean equals(Object other) { + if (sameClassAs(other) == false) { + return false; + } + SortedUnsignedLongDocValuesSetQuery that = (SortedUnsignedLongDocValuesSetQuery) other; + return field.equals(that.field) && numbers.equals(that.numbers); + } + + @Override + public int hashCode() { + return Objects.hash(classHash(), field, numbers); + } + + abstract SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException; + + @Override + public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { + return new ConstantScoreWeight(this, boost) { + + @Override + public boolean isCacheable(LeafReaderContext ctx) { + return DocValues.isCacheable(ctx, field); + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + SortedNumericDocValues values = getValues(context.reader(), field); + if (values == null) { + return null; + } + final NumericDocValues singleton = DocValues.unwrapSingleton(values); + final TwoPhaseIterator iterator; + if (singleton != null) { + iterator = new TwoPhaseIterator(singleton) { + @Override + public boolean matches() throws IOException { + long value = singleton.longValue(); + return Long.compareUnsigned(value, numbers.minValue) >= 0 + && Long.compareUnsigned(value, numbers.maxValue) <= 0 + && numbers.contains(value); + } + + @Override + public float matchCost() { + return 5; // 2 comparisions, possible lookup in the set + } + }; + } else { + iterator = new TwoPhaseIterator(values) { + @Override + public boolean matches() throws IOException { + int count = values.docValueCount(); + for (int i = 0; i < count; i++) { + final long value = values.nextValue(); + if (Long.compareUnsigned(value, numbers.minValue) < 0) { + continue; + } else if (Long.compareUnsigned(value, numbers.maxValue) > 0) { + return false; // values are sorted, terminate + } else if (numbers.contains(value)) { + return true; + } + } + return false; + } + + @Override + public float matchCost() { + return 5; // 2 comparisons, possible lookup in the set + } + }; + } + return new ConstantScoreScorer(this, score(), scoreMode, iterator); + } + }; + } + + public static Query newSlowSetQuery(String field, BigInteger... values) { + return new SortedUnsignedLongDocValuesSetQuery(field, values) { + @Override + SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException { + FieldInfo info = reader.getFieldInfos().fieldInfo(field); + if (info == null) { + // Queries have some optimizations when one sub scorer returns null rather + // than a scorer that does not match any documents + return null; + } + return DocValues.getSortedNumeric(reader, field); + } + }; + } + + public static Query newSlowExactQuery(String field, BigInteger value) { + return new SortedUnsignedLongDocValuesRangeQuery(field, value, value) { + @Override + SortedNumericDocValues getValues(LeafReader reader, String field) throws IOException { + FieldInfo info = reader.getFieldInfos().fieldInfo(field); + if (info == null) { + // Queries have some optimizations when one sub scorer returns null rather + // than a scorer that does not match any documents + return null; + } + return DocValues.getSortedNumeric(reader, field); + } + }; + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index 204e7bc4c16ab..524d2b0e0dd38 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -37,6 +37,7 @@ import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatField; import org.apache.lucene.document.FloatPoint; import org.apache.lucene.document.IntPoint; import org.apache.lucene.document.LongPoint; @@ -61,6 +62,7 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.core.xcontent.XContentParser.Token; import org.opensearch.index.document.SortedUnsignedLongDocValuesRangeQuery; +import org.opensearch.index.document.SortedUnsignedLongDocValuesSetQuery; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData.NumericType; import org.opensearch.index.fielddata.plain.SortedNumericIndexFieldData; @@ -201,18 +203,39 @@ public Float parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { float v = parse(value, false); + if (isSearchable && hasDocValues) { + Query query = HalfFloatPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, HalfFloatPoint.halfFloatToSortableShort(v)); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, HalfFloatPoint.halfFloatToSortableShort(v)); + } return HalfFloatPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable) { float[] v = new float[values.size()]; + long points[] = new long[v.length]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); + if (hasDocValues) { + points[i] = HalfFloatPoint.halfFloatToSortableShort(v[i]); + } + } + if (isSearchable && hasDocValues) { + Query query = HalfFloatPoint.newSetQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowSetQuery(field, points); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); } return HalfFloatPoint.newSetQuery(field, v); + } @Override @@ -223,6 +246,7 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { float l = Float.NEGATIVE_INFINITY; @@ -241,16 +265,23 @@ public Query rangeQuery( } u = HalfFloatPoint.nextDown(u); } - Query query = HalfFloatPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = HalfFloatPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( field, HalfFloatPoint.halfFloatToSortableShort(l), HalfFloatPoint.halfFloatToSortableShort(u) ); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowRangeQuery( + field, + HalfFloatPoint.halfFloatToSortableShort(l), + HalfFloatPoint.halfFloatToSortableShort(u) + ); } - return query; + return HalfFloatPoint.newRangeQuery(field, l, u); } @Override @@ -309,18 +340,39 @@ public Float parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { float v = parse(value, false); + if (isSearchable && hasDocValues) { + Query query = FloatPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.floatToSortableInt(v)); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.floatToSortableInt(v)); + } return FloatPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable) { float[] v = new float[values.size()]; + long points[] = new long[v.length]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); + if (hasDocValues) { + points[i] = NumericUtils.floatToSortableInt(v[i]); + } + } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + FloatPoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, points) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); } - return FloatPoint.newSetQuery(field, v); + return FloatField.newSetQuery(field, v); } @Override @@ -331,6 +383,7 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { float l = Float.NEGATIVE_INFINITY; @@ -347,16 +400,23 @@ public Query rangeQuery( u = FloatPoint.nextDown(u); } } - Query query = FloatPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = FloatPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( field, NumericUtils.floatToSortableInt(l), NumericUtils.floatToSortableInt(u) ); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); } - return query; + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowRangeQuery( + field, + NumericUtils.floatToSortableInt(l), + NumericUtils.floatToSortableInt(u) + ); + } + return FloatPoint.newRangeQuery(field, l, u); } @Override @@ -406,16 +466,37 @@ public Double parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { double v = parse(value, false); + if (isSearchable && hasDocValues) { + Query query = DoublePoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.doubleToSortableLong(v)); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, NumericUtils.doubleToSortableLong(v)); + } return DoublePoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable) { double[] v = new double[values.size()]; + long points[] = new long[v.length]; for (int i = 0; i < values.size(); ++i) { v[i] = parse(values.get(i), false); + if (hasDocValues) { + points[i] = NumericUtils.doubleToSortableLong(v[i]); + } + } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + DoublePoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, points) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); } return DoublePoint.newSetQuery(field, v); } @@ -428,19 +509,27 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { return doubleRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - Query query = DoublePoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = DoublePoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery( field, NumericUtils.doubleToSortableLong(l), NumericUtils.doubleToSortableLong(u) ); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); } - return query; + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowRangeQuery( + field, + NumericUtils.doubleToSortableLong(l), + NumericUtils.doubleToSortableLong(u) + ); + } + return DoublePoint.newRangeQuery(field, l, u); }); } @@ -504,13 +593,13 @@ public Short parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { - return INTEGER.termQuery(field, value); + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termQuery(field, value, hasDocValues, isSearchable); } @Override - public Query termsQuery(String field, List values) { - return INTEGER.termsQuery(field, values); + public Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termsQuery(field, values, hasDocValues, isSearchable); } @Override @@ -521,9 +610,10 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { - return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, context); + return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, isSearchable, context); } @Override @@ -571,13 +661,13 @@ public Short parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { - return INTEGER.termQuery(field, value); + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termQuery(field, value, hasDocValues, isSearchable); } @Override - public Query termsQuery(String field, List values) { - return INTEGER.termsQuery(field, values); + public Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable) { + return INTEGER.termsQuery(field, values, hasDocValues, isSearchable); } @Override @@ -588,9 +678,10 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { - return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, context); + return INTEGER.rangeQuery(field, lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues, isSearchable, context); } @Override @@ -638,16 +729,24 @@ public Integer parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } int v = parse(value, true); + if (isSearchable && hasDocValues) { + Query query = IntPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, v); + } return IntPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable) { int[] v = new int[values.size()]; int upTo = 0; @@ -664,6 +763,21 @@ public Query termsQuery(String field, List values) { if (upTo != v.length) { v = Arrays.copyOf(v, upTo); } + long points[] = new long[v.length]; + if (hasDocValues) { + for (int i = 0; i < v.length; i++) { + points[i] = v[i]; + } + } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + IntPoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, points) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, points); + } return IntPoint.newSetQuery(field, v); } @@ -675,6 +789,7 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { int l = Integer.MIN_VALUE; @@ -704,15 +819,23 @@ public Query rangeQuery( --u; } } - Query query = IntPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = IntPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); query = new IndexOrDocValuesQuery(query, dvQuery); if (context.indexSortedOnField(field)) { query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); } + return query; } - return query; + if (hasDocValues) { + Query query = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + } + return query; + } + return IntPoint.newRangeQuery(field, l, u); } @Override @@ -752,17 +875,28 @@ public Long parse(XContentParser parser, boolean coerce) throws IOException { } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } long v = parse(value, true); + if (isSearchable && hasDocValues) { + Query query = LongPoint.newExactQuery(field, v); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowExactQuery(field, v); + + } return LongPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable) { long[] v = new long[values.size()]; + int upTo = 0; for (int i = 0; i < values.size(); i++) { @@ -778,6 +912,16 @@ public Query termsQuery(String field, List values) { if (upTo != v.length) { v = Arrays.copyOf(v, upTo); } + if (isSearchable && hasDocValues) { + return new IndexOrDocValuesQuery( + LongPoint.newSetQuery(field, v), + SortedNumericDocValuesField.newSlowSetQuery(field, v) + ); + } + if (hasDocValues) { + return SortedNumericDocValuesField.newSlowSetQuery(field, v); + + } return LongPoint.newSetQuery(field, v); } @@ -789,18 +933,28 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { return longRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - Query query = LongPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = LongPoint.newRangeQuery(field, l, u); Query dvQuery = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); query = new IndexOrDocValuesQuery(query, dvQuery); if (context.indexSortedOnField(field)) { query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); } + return query; } - return query; + if (hasDocValues) { + Query query = SortedNumericDocValuesField.newSlowRangeQuery(field, l, u); + if (context.indexSortedOnField(field)) { + query = new IndexSortSortedNumericDocValuesRangeQuery(field, l, u, query); + } + return query; + } + return LongPoint.newRangeQuery(field, l, u); + }); } @@ -841,16 +995,24 @@ public BigInteger parse(XContentParser parser, boolean coerce) throws IOExceptio } @Override - public Query termQuery(String field, Object value) { + public Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable) { if (hasDecimalPart(value)) { return Queries.newMatchNoDocsQuery("Value [" + value + "] has a decimal part"); } BigInteger v = parse(value, true); + if (isSearchable && hasDocValues) { + Query query = BigIntegerPoint.newExactQuery(field, v); + Query dvQuery = SortedUnsignedLongDocValuesSetQuery.newSlowExactQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocValues) { + return SortedUnsignedLongDocValuesSetQuery.newSlowExactQuery(field, v); + } return BigIntegerPoint.newExactQuery(field, v); } @Override - public Query termsQuery(String field, List values) { + public Query termsQuery(String field, List values, boolean hasDocvalues, boolean isSearchable) { BigInteger[] v = new BigInteger[values.size()]; int upTo = 0; @@ -868,6 +1030,14 @@ public Query termsQuery(String field, List values) { v = Arrays.copyOf(v, upTo); } + if (isSearchable && hasDocvalues) { + Query query = BigIntegerPoint.newSetQuery(field, v); + Query dvQuery = SortedUnsignedLongDocValuesSetQuery.newSlowSetQuery(field, v); + return new IndexOrDocValuesQuery(query, dvQuery); + } + if (hasDocvalues) { + return SortedUnsignedLongDocValuesSetQuery.newSlowSetQuery(field, v); + } return BigIntegerPoint.newSetQuery(field, v); } @@ -879,15 +1049,19 @@ public Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ) { return unsignedLongRangeQuery(lowerTerm, upperTerm, includeLower, includeUpper, (l, u) -> { - Query query = BigIntegerPoint.newRangeQuery(field, l, u); - if (hasDocValues) { + if (isSearchable && hasDocValues) { + Query query = BigIntegerPoint.newRangeQuery(field, l, u); Query dvQuery = SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery(field, l, u); - query = new IndexOrDocValuesQuery(query, dvQuery); + return new IndexOrDocValuesQuery(query, dvQuery); } - return query; + if (hasDocValues) { + return SortedUnsignedLongDocValuesRangeQuery.newSlowRangeQuery(field, l, u); + } + return BigIntegerPoint.newRangeQuery(field, l, u); }); } @@ -941,9 +1115,9 @@ public final TypeParser parser() { return parser; } - public abstract Query termQuery(String field, Object value); + public abstract Query termQuery(String field, Object value, boolean hasDocValues, boolean isSearchable); - public abstract Query termsQuery(String field, List values); + public abstract Query termsQuery(String field, List values, boolean hasDocValues, boolean isSearchable); public abstract Query rangeQuery( String field, @@ -952,6 +1126,7 @@ public abstract Query rangeQuery( boolean includeLower, boolean includeUpper, boolean hasDocValues, + boolean isSearchable, QueryShardContext context ); @@ -1226,8 +1401,8 @@ public NumericType numericType() { @Override public Query termQuery(Object value, QueryShardContext context) { - failIfNotIndexed(); - Query query = type.termQuery(name(), value); + failIfNotIndexedAndNoDocValues(); + Query query = type.termQuery(name(), value, hasDocValues(), isSearchable()); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -1236,8 +1411,8 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query termsQuery(List values, QueryShardContext context) { - failIfNotIndexed(); - Query query = type.termsQuery(name(), values); + failIfNotIndexedAndNoDocValues(); + Query query = type.termsQuery(name(), values, hasDocValues(), isSearchable()); if (boost() != 1f) { query = new BoostQuery(query, boost()); } @@ -1246,8 +1421,17 @@ public Query termsQuery(List values, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, QueryShardContext context) { - failIfNotIndexed(); - Query query = type.rangeQuery(name(), lowerTerm, upperTerm, includeLower, includeUpper, hasDocValues(), context); + failIfNotIndexedAndNoDocValues(); + Query query = type.rangeQuery( + name(), + lowerTerm, + upperTerm, + includeLower, + includeUpper, + hasDocValues(), + isSearchable(), + context + ); if (boost() != 1f) { query = new BoostQuery(query, boost()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java index 3c30bb81a9a32..af852b12e7a30 100644 --- a/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/NumberFieldTypeTests.java @@ -66,6 +66,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.index.IndexSettings; import org.opensearch.index.document.SortedUnsignedLongDocValuesRangeQuery; +import org.opensearch.index.document.SortedUnsignedLongDocValuesSetQuery; import org.opensearch.index.fielddata.IndexNumericFieldData; import org.opensearch.index.mapper.MappedFieldType.Relation; import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; @@ -118,15 +119,27 @@ public void testIsFieldWithinQuery() throws IOException { public void testIntegerTermsQueryWithDecimalPart() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberType.INTEGER); - assertEquals(IntPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1, 2.1), null)); - assertEquals(IntPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1.0, 2.1), null)); + assertEquals( + new IndexOrDocValuesQuery(IntPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1, 2.1), null) + ); + assertEquals( + new IndexOrDocValuesQuery(IntPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1.0, 2.1), null) + ); assertTrue(ft.termsQuery(Arrays.asList(1.1, 2.1), null) instanceof MatchNoDocsQuery); } public void testLongTermsQueryWithDecimalPart() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberType.LONG); - assertEquals(LongPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1, 2.1), null)); - assertEquals(LongPoint.newSetQuery("field", 1), ft.termsQuery(Arrays.asList(1.0, 2.1), null)); + assertEquals( + new IndexOrDocValuesQuery(LongPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1, 2.1), null) + ); + assertEquals( + new IndexOrDocValuesQuery(LongPoint.newSetQuery("field", 1), SortedNumericDocValuesField.newSlowSetQuery("field", 1)), + ft.termsQuery(Arrays.asList(1.0, 2.1), null) + ); assertTrue(ft.termsQuery(Arrays.asList(1.1, 2.1), null) instanceof MatchNoDocsQuery); } @@ -151,16 +164,18 @@ public void testLongTermQueryWithDecimalPart() { } private static MappedFieldType unsearchable() { - return new NumberFieldType("field", NumberType.LONG, false, false, true, true, null, Collections.emptyMap()); + return new NumberFieldType("field", NumberType.LONG, false, false, false, true, null, Collections.emptyMap()); } public void testTermQuery() { MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.LONG); - assertEquals(LongPoint.newExactQuery("field", 42), ft.termQuery("42", null)); + Query dvQuery = SortedNumericDocValuesField.newSlowExactQuery("field", 42); + Query query = new IndexOrDocValuesQuery(LongPoint.newExactQuery("field", 42), dvQuery); + assertEquals(query, ft.termQuery("42", null)); MappedFieldType unsearchable = unsearchable(); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("42", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testRangeQueryWithNegativeBounds() { @@ -380,7 +395,7 @@ public void testLongRangeQuery() { IllegalArgumentException.class, () -> unsearchable.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testUnsignedLongRangeQuery() { @@ -396,7 +411,23 @@ public void testUnsignedLongRangeQuery() { IllegalArgumentException.class, () -> unsearchable.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); + } + + public void testUnsignedLongTermsQuery() { + MappedFieldType ft = new NumberFieldMapper.NumberFieldType("field", NumberFieldMapper.NumberType.UNSIGNED_LONG); + Query expected = new IndexOrDocValuesQuery( + BigIntegerPoint.newSetQuery("field", BigInteger.valueOf(1), BigInteger.valueOf(3)), + SortedUnsignedLongDocValuesSetQuery.newSlowSetQuery("field", BigInteger.valueOf(1), BigInteger.valueOf(3)) + ); + assertEquals(expected, ft.termsQuery(List.of("1", "3"), MOCK_QSC)); + + MappedFieldType unsearchable = unsearchable(); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termsQuery(List.of("1", "3"), MOCK_QSC) + ); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testDoubleRangeQuery() { @@ -416,7 +447,7 @@ public void testDoubleRangeQuery() { IllegalArgumentException.class, () -> unsearchable.rangeQuery("1", "3", true, true, null, null, null, MOCK_QSC) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals("Cannot search on field [field] since it is both not indexed, and does not have doc_values enabled.", e.getMessage()); } public void testConversions() { @@ -518,8 +549,8 @@ public void testHalfFloatRange() throws IOException { float u = (randomFloat() * 2 - 1) * 65504; boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); - Query floatQ = NumberType.FLOAT.rangeQuery("float", l, u, includeLower, includeUpper, false, MOCK_QSC); - Query halfFloatQ = NumberType.HALF_FLOAT.rangeQuery("half_float", l, u, includeLower, includeUpper, false, MOCK_QSC); + Query floatQ = NumberType.FLOAT.rangeQuery("float", l, u, includeLower, includeUpper, false, true, MOCK_QSC); + Query halfFloatQ = NumberType.HALF_FLOAT.rangeQuery("half_float", l, u, includeLower, includeUpper, false, true, MOCK_QSC); assertEquals(searcher.count(floatQ), searcher.count(halfFloatQ)); } IOUtils.close(reader, dir); @@ -549,8 +580,17 @@ public void testUnsignedLongRange() throws IOException { BigInteger u = randomUnsignedLong(); boolean includeLower = randomBoolean(); boolean includeUpper = randomBoolean(); - Query unsignedLongQ = NumberType.UNSIGNED_LONG.rangeQuery("unsigned_long", l, u, includeLower, includeUpper, false, MOCK_QSC); - Query doubleQ = NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, MOCK_QSC); + Query unsignedLongQ = NumberType.UNSIGNED_LONG.rangeQuery( + "unsigned_long", + l, + u, + includeLower, + includeUpper, + false, + true, + MOCK_QSC + ); + Query doubleQ = NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, true, MOCK_QSC); assertEquals(searcher.count(doubleQ), searcher.count(unsignedLongQ)); } IOUtils.close(reader, dir); @@ -558,21 +598,23 @@ public void testUnsignedLongRange() throws IOException { public void testNegativeZero() { assertEquals( - NumberType.DOUBLE.rangeQuery("field", null, -0d, true, true, false, MOCK_QSC), - NumberType.DOUBLE.rangeQuery("field", null, +0d, true, false, false, MOCK_QSC) + NumberType.DOUBLE.rangeQuery("field", null, -0d, true, true, false, true, MOCK_QSC), + NumberType.DOUBLE.rangeQuery("field", null, +0d, true, false, false, true, MOCK_QSC) ); assertEquals( - NumberType.FLOAT.rangeQuery("field", null, -0f, true, true, false, MOCK_QSC), - NumberType.FLOAT.rangeQuery("field", null, +0f, true, false, false, MOCK_QSC) + NumberType.FLOAT.rangeQuery("field", null, -0f, true, true, false, true, MOCK_QSC), + NumberType.FLOAT.rangeQuery("field", null, +0f, true, false, false, true, MOCK_QSC) ); assertEquals( - NumberType.HALF_FLOAT.rangeQuery("field", null, -0f, true, true, false, MOCK_QSC), - NumberType.HALF_FLOAT.rangeQuery("field", null, +0f, true, false, false, MOCK_QSC) + NumberType.HALF_FLOAT.rangeQuery("field", null, -0f, true, true, false, true, MOCK_QSC), + NumberType.HALF_FLOAT.rangeQuery("field", null, +0f, true, false, false, true, MOCK_QSC) ); - assertFalse(NumberType.DOUBLE.termQuery("field", -0d).equals(NumberType.DOUBLE.termQuery("field", +0d))); - assertFalse(NumberType.FLOAT.termQuery("field", -0f).equals(NumberType.FLOAT.termQuery("field", +0f))); - assertFalse(NumberType.HALF_FLOAT.termQuery("field", -0f).equals(NumberType.HALF_FLOAT.termQuery("field", +0f))); + assertFalse(NumberType.DOUBLE.termQuery("field", -0d, true, true).equals(NumberType.DOUBLE.termQuery("field", +0d, true, true))); + assertFalse(NumberType.FLOAT.termQuery("field", -0f, true, true).equals(NumberType.FLOAT.termQuery("field", +0f, true, true))); + assertFalse( + NumberType.HALF_FLOAT.termQuery("field", -0f, true, true).equals(NumberType.HALF_FLOAT.termQuery("field", +0f, true, true)) + ); } // Make sure we construct the IndexOrDocValuesQuery objects with queries that match @@ -628,6 +670,7 @@ public void doTestDocValueRangeQueries(NumberType type, Supplier valueSu randomBoolean(), randomBoolean(), true, + true, MOCK_QSC ); assertThat(query, instanceOf(IndexOrDocValuesQuery.class)); @@ -708,6 +751,7 @@ public void doTestIndexSortRangeQueries(NumberType type, Supplier valueS randomBoolean(), randomBoolean(), true, + true, context ); assertThat(query, instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class)); diff --git a/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java index 3ac9bce840a20..c5bdf9b586df1 100644 --- a/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermQueryBuilderTests.java @@ -36,6 +36,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.AutomatonQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; @@ -112,6 +113,7 @@ protected void doAssertLuceneQuery(TermQueryBuilder queryBuilder, Query query, Q either(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)) .or(instanceOf(MatchNoDocsQuery.class)) .or(instanceOf(AutomatonQuery.class)) + .or(instanceOf(IndexOrDocValuesQuery.class)) ); MappedFieldType mapper = context.fieldMapper(queryBuilder.fieldName()); if (query instanceof TermQuery) { diff --git a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java index b587bec2d5343..32bf290627b63 100644 --- a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PointInSetQuery; import org.apache.lucene.search.Query; @@ -135,6 +136,7 @@ protected void doAssertLuceneQuery(TermsQueryBuilder queryBuilder, Query query, either(instanceOf(TermInSetQuery.class)).or(instanceOf(PointInSetQuery.class)) .or(instanceOf(ConstantScoreQuery.class)) .or(instanceOf(MatchNoDocsQuery.class)) + .or(instanceOf(IndexOrDocValuesQuery.class)) ); if (query instanceof ConstantScoreQuery) { assertThat(((ConstantScoreQuery) query).getQuery(), instanceOf(BooleanQuery.class)); From 808ed56bcbc6ed547f64895cd02283df07fb5c48 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 4 Jan 2024 15:14:29 -0500 Subject: [PATCH 33/81] Add the means to extract the contextual underlying channel from HttpChannel without excessive typecasting (#11751) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 +- .../http/netty4/Netty4HttpChannel.java | 5 ++ .../http/netty4/Netty4HttpChannelTests.java | 52 +++++++++++++++++++ 3 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpChannelTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index ac15176af56f5..51276133af7d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -162,7 +162,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) - Force merge with `only_expunge_deletes` honors max segment size ([#10036](https://github.com/opensearch-project/OpenSearch/pull/10036)) -- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)) +- Add the means to extract the contextual properties from HttpChannel, TcpCChannel and TrasportChannel without excessive typecasting ([#10562](https://github.com/opensearch-project/OpenSearch/pull/10562)), ([#11751](https://github.com/opensearch-project/OpenSearch/pull/11751)) - Introduce new dynamic cluster setting to control slice computation for concurrent segment search ([#9107](https://github.com/opensearch-project/OpenSearch/pull/9107)) - Search pipelines now support asynchronous request and response processors to avoid blocking on a transport thread ([#10598](https://github.com/opensearch-project/OpenSearch/pull/10598)) - [Remote Store] Add Remote Store backpressure rejection stats to `_nodes/stats` ([#10524](https://github.com/opensearch-project/OpenSearch/pull/10524)) diff --git a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java index 6475a0b744c60..75d30aa9797c0 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/http/netty4/Netty4HttpChannel.java @@ -46,6 +46,7 @@ import io.netty.channel.ChannelPipeline; public class Netty4HttpChannel implements HttpChannel { + private static final String CHANNEL_PROPERTY = "channel"; private final Channel channel; private final CompletableContext closeContext = new CompletableContext<>(); @@ -102,6 +103,10 @@ public Channel getNettyChannel() { @SuppressWarnings("unchecked") @Override public Optional get(String name, Class clazz) { + if (CHANNEL_PROPERTY.equalsIgnoreCase(name) && clazz.isAssignableFrom(Channel.class)) { + return (Optional) Optional.of(getNettyChannel()); + } + Object handler = getNettyChannel().pipeline().get(name); if (handler == null && inboundPipeline() != null) { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpChannelTests.java new file mode 100644 index 0000000000000..c49166a51c24a --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpChannelTests.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http.netty4; + +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.transport.Netty4NioSocketChannel; +import org.junit.Before; + +import java.util.Optional; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelOutboundInvoker; +import io.netty.channel.ServerChannel; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.sameInstance; + +public class Netty4HttpChannelTests extends OpenSearchTestCase { + private Netty4HttpChannel netty4HttpChannel; + private Channel channel; + + @Before + @Override + public void setUp() throws Exception { + super.setUp(); + channel = new Netty4NioSocketChannel(); + netty4HttpChannel = new Netty4HttpChannel(channel); + } + + public void testChannelAttributeMatchesChannel() { + final Optional channelOpt = netty4HttpChannel.get("channel", Channel.class); + assertThat(channelOpt.isPresent(), is(true)); + assertThat(channelOpt.get(), sameInstance(channel)); + } + + public void testChannelAttributeMatchesChannelOutboundInvoker() { + final Optional channelOpt = netty4HttpChannel.get("channel", ChannelOutboundInvoker.class); + assertThat(channelOpt.isPresent(), is(true)); + assertThat(channelOpt.get(), sameInstance(channel)); + } + + public void testChannelAttributeIsEmpty() { + final Optional channelOpt = netty4HttpChannel.get("channel", ServerChannel.class); + assertThat(channelOpt.isEmpty(), is(true)); + } +} From 22b628bacaab56c145538898ca190ffce9778c6e Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Thu, 4 Jan 2024 21:13:28 +0000 Subject: [PATCH 34/81] Fix parsing of flat object fields with dots in keys (#11425) We have a bug where a flat object field with inner fields that contain dots will "push" the dotted name onto the dot-path from the root, but then would just "pop" off the last part of the dotted name. This change adds more robust support for flat object keys and subkeys that contain dots (i.e. it pops off the entirety of the latest key, regardless of how many dots it contains). Fixes https://github.com/opensearch-project/OpenSearch/issues/11402 Signed-off-by: Michael Froh --- CHANGELOG.md | 3 +- .../xcontent/JsonToStringXContentParser.java | 31 +++-- .../index/mapper/FlatObjectFieldMapper.java | 2 +- .../JsonToStringXContentParserTests.java | 113 ++++++++++++++++++ 4 files changed, 137 insertions(+), 12 deletions(-) create mode 100644 server/src/test/java/org/opensearch/common/xcontent/JsonToStringXContentParserTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 51276133af7d2..0958dd41d5a84 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -191,7 +191,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Fixed - Fix failure in dissect ingest processor parsing empty brackets ([#9225](https://github.com/opensearch-project/OpenSearch/pull/9255)) -- Fix class_cast_exception when passing int to _version and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) +- Fix `class_cast_exception` when passing int to `_version` and other metadata fields in ingest simulate API ([#10101](https://github.com/opensearch-project/OpenSearch/pull/10101)) - Fix Segment Replication ShardLockObtainFailedException bug during index corruption ([10370](https://github.com/opensearch-project/OpenSearch/pull/10370)) - Fix some test methods in SimulatePipelineRequestParsingTests never run and fix test failure ([#10496](https://github.com/opensearch-project/OpenSearch/pull/10496)) - Fix passing wrong parameter when calling newConfigurationException() in DotExpanderProcessor ([#10737](https://github.com/opensearch-project/OpenSearch/pull/10737)) @@ -203,6 +203,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix the issue with DefaultSpanScope restoring wrong span in the TracerContextStorage upon detach ([#11316](https://github.com/opensearch-project/OpenSearch/issues/11316)) - Remove shadowJar from `lang-painless` module publication ([#11369](https://github.com/opensearch-project/OpenSearch/issues/11369)) - Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) +- Fix parsing of flat object fields with dots in keys ([#11425](https://github.com/opensearch-project/OpenSearch/pull/11425)) - Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) - Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) - Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 9e81d8a7af078..9b2bd06a88e2e 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -18,7 +18,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.mapper.ParseContext; import java.io.IOException; import java.math.BigInteger; @@ -40,7 +39,6 @@ public class JsonToStringXContentParser extends AbstractXContentParser { private ArrayList keyList = new ArrayList<>(); private XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent); - private ParseContext parseContext; private NamedXContentRegistry xContentRegistry; @@ -54,14 +52,13 @@ public class JsonToStringXContentParser extends AbstractXContentParser { public JsonToStringXContentParser( NamedXContentRegistry xContentRegistry, DeprecationHandler deprecationHandler, - ParseContext parseContext, + XContentParser parser, String fieldTypeName ) throws IOException { super(xContentRegistry, deprecationHandler); - this.parseContext = parseContext; this.deprecationHandler = deprecationHandler; this.xContentRegistry = xContentRegistry; - this.parser = parseContext.parser(); + this.parser = parser; this.fieldTypeName = fieldTypeName; } @@ -86,8 +83,22 @@ private void parseToken(StringBuilder path, String currentFieldName) throws IOEx StringBuilder parsedFields = new StringBuilder(); if (this.parser.currentToken() == Token.FIELD_NAME) { - path.append(DOT_SYMBOL + currentFieldName); - this.keyList.add(currentFieldName); + path.append(DOT_SYMBOL).append(currentFieldName); + int dotIndex = currentFieldName.indexOf(DOT_SYMBOL); + String fieldNameSuffix = currentFieldName; + // The field name may be of the form foo.bar.baz + // If that's the case, each "part" is a key. + while (dotIndex >= 0) { + String fieldNamePrefix = fieldNameSuffix.substring(0, dotIndex); + if (!fieldNamePrefix.isEmpty()) { + this.keyList.add(fieldNamePrefix); + } + fieldNameSuffix = fieldNameSuffix.substring(dotIndex + 1); + dotIndex = fieldNameSuffix.indexOf(DOT_SYMBOL); + } + if (!fieldNameSuffix.isEmpty()) { + this.keyList.add(fieldNameSuffix); + } } else if (this.parser.currentToken() == Token.START_ARRAY) { parseToken(path, currentFieldName); break; @@ -97,18 +108,18 @@ private void parseToken(StringBuilder path, String currentFieldName) throws IOEx parseToken(path, currentFieldName); int dotIndex = path.lastIndexOf(DOT_SYMBOL); if (dotIndex != -1) { - path.delete(dotIndex, path.length()); + path.setLength(path.length() - currentFieldName.length() - 1); } } else { if (!path.toString().contains(currentFieldName)) { - path.append(DOT_SYMBOL + currentFieldName); + path.append(DOT_SYMBOL).append(currentFieldName); } parseValue(parsedFields); this.valueList.add(parsedFields.toString()); this.valueAndPathList.add(path + EQUAL_SYMBOL + parsedFields); int dotIndex = path.lastIndexOf(DOT_SYMBOL); if (dotIndex != -1) { - path.delete(dotIndex, path.length()); + path.setLength(path.length() - currentFieldName.length() - 1); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index 00b623dddac23..9a3f2595a7c9e 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -572,7 +572,7 @@ protected void parseCreateField(ParseContext context) throws IOException { JsonToStringXContentParser JsonToStringParser = new JsonToStringXContentParser( NamedXContentRegistry.EMPTY, DeprecationHandler.IGNORE_DEPRECATIONS, - context, + context.parser(), fieldType().name() ); /* diff --git a/server/src/test/java/org/opensearch/common/xcontent/JsonToStringXContentParserTests.java b/server/src/test/java/org/opensearch/common/xcontent/JsonToStringXContentParserTests.java new file mode 100644 index 0000000000000..0feb7bcd1ceec --- /dev/null +++ b/server/src/test/java/org/opensearch/common/xcontent/JsonToStringXContentParserTests.java @@ -0,0 +1,113 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.xcontent; + +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class JsonToStringXContentParserTests extends OpenSearchTestCase { + + private String flattenJsonString(String fieldName, String in) throws IOException { + String transformed; + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + in + ) + ) { + JsonToStringXContentParser jsonToStringXContentParser = new JsonToStringXContentParser( + xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + parser, + fieldName + ); + // Skip the START_OBJECT token: + jsonToStringXContentParser.nextToken(); + + XContentParser transformedParser = jsonToStringXContentParser.parseObject(); + try (XContentBuilder jsonBuilder = XContentFactory.jsonBuilder()) { + jsonBuilder.copyCurrentStructure(transformedParser); + return jsonBuilder.toString(); + } + } + } + + public void testNestedObjects() throws IOException { + String jsonExample = "{" + "\"first\" : \"1\"," + "\"second\" : {" + " \"inner\": \"2.0\"" + "}," + "\"third\": \"three\"" + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + + public void testChildHasDots() throws IOException { + // This should be exactly the same as testNestedObjects. We're just using the "flat" notation for the inner + // object. + String jsonExample = "{" + "\"first\" : \"1\"," + "\"second.inner\" : \"2.0\"," + "\"third\": \"three\"" + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + + public void testNestChildObjectWithDots() throws IOException { + String jsonExample = "{" + + "\"first\" : \"1\"," + + "\"second.inner\" : {" + + " \"really_inner\" : \"2.0\"" + + "}," + + "\"third\": \"three\"" + + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"really_inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner.really_inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + + public void testNestChildObjectWithDotsAndFieldWithDots() throws IOException { + String jsonExample = "{" + + "\"first\" : \"1\"," + + "\"second.inner\" : {" + + " \"totally.absolutely.inner\" : \"2.0\"" + + "}," + + "\"third\": \"three\"" + + "}"; + + assertEquals( + "{" + + "\"flat\":[\"first\",\"second\",\"inner\",\"totally\",\"absolutely\",\"inner\",\"third\"]," + + "\"flat._value\":[\"1\",\"2.0\",\"three\"]," + + "\"flat._valueAndPath\":[\"flat.first=1\",\"flat.second.inner.totally.absolutely.inner=2.0\",\"flat.third=three\"]" + + "}", + flattenJsonString("flat", jsonExample) + ); + } + +} From 2860805e894c49a04ebf72121d96a24d353b6194 Mon Sep 17 00:00:00 2001 From: Rishabh Maurya Date: Thu, 4 Jan 2024 13:17:25 -0800 Subject: [PATCH 35/81] Update the skip version for match_only_text field Integ tests to 2.11.99 (#11752) * update the skip version for integ tests to 2.11.99 Signed-off-by: Rishabh Maurya * fix the version in skip section reason Signed-off-by: Rishabh Maurya --------- Signed-off-by: Rishabh Maurya --- .../11_match_field_match_only_text.yml | 4 ++-- .../20_ngram_search_field_match_only_text.yml | 8 ++++---- ..._ngram_highligthing_field_match_only_text.yml | 4 ++-- .../40_query_string_field_match_only_text.yml | 4 ++-- ...th_default_analyzer_field_match_only_text.yml | 4 ++-- ...eries_with_synonyms_field_match_only_text.yml | 8 ++++---- .../60_synonym_graph_field_match_only_text.yml | 4 ++-- .../70_intervals_field_match_only_text.yml | 8 ++++---- .../20_phrase_field_match_only_text.yml | 16 ++++++++-------- .../20_highlighting_field_match_only_text.yml | 4 ++-- .../20_query_string_field_match_only_text.yml | 4 ++-- .../30_sig_terms_field_match_only_text.yml | 4 ++-- .../90_sig_text_field_match_only_text.yml | 8 ++++---- .../20_highlighting_field_match_only_text.yml | 4 ++-- .../search/160_exists_query_match_only_text.yml | 4 ++-- .../200_phrase_search_field_match_only_text.yml | 4 ++-- ...0_match_bool_prefix_field_match_only_text.yml | 4 ++-- ...20_disallow_queries_field_match_only_text.yml | 4 ++-- .../10_basic_field_match_only_field.yml | 4 ++-- 19 files changed, 52 insertions(+), 52 deletions(-) diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml index 40ff2c2f4cdbe..140d70414a4a7 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml @@ -2,8 +2,8 @@ "match query with stacked stems": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" # Tests the match query stemmed tokens are "stacked" on top of the unstemmed # versions in the same position. - do: diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml index 95b648dee47c8..a5da3043f19b5 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/20_ngram_search_field_match_only_text.yml @@ -1,7 +1,7 @@ "ngram search": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test @@ -45,8 +45,8 @@ --- "testNGramCopyField": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml index 597f55679a2c6..accf5d975d57f 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml @@ -1,7 +1,7 @@ "ngram highlighting": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml index ddebb1d76acbc..717d3a7dd8a3e 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/40_query_string_field_match_only_text.yml @@ -1,8 +1,8 @@ --- "Test query string with snowball": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml index 97f3fb65e94a2..cd2d2e42c6a17 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/41_query_string_with_default_analyzer_field_match_only_text.yml @@ -1,8 +1,8 @@ --- "Test default search analyzer is applied": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml index 0c263a47a38e6..0c537dd42d583 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/50_queries_with_synonyms_field_match_only_text.yml @@ -1,8 +1,8 @@ --- "Test common terms query with stacked tokens": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" features: "allowed_warnings" - do: @@ -247,8 +247,8 @@ --- "Test match query with synonyms - see #3881 for extensive description of the issue": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml index 91a8b1509517e..d3f5d0fe4f8b4 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/60_synonym_graph_field_match_only_text.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml index 9792c9d2695ea..8334ca27ff274 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/70_intervals_field_match_only_text.yml @@ -1,8 +1,8 @@ # integration tests for intervals queries using analyzers setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test @@ -26,8 +26,8 @@ setup: --- "Test use_field": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: catch: bad_request search: diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml index aff2b3f11101c..90596ca04205c 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.suggest/20_phrase_field_match_only_text.yml @@ -2,8 +2,8 @@ setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test @@ -122,8 +122,8 @@ setup: --- "breaks ties by sorting terms": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" # This runs the suggester without bigrams so we can be sure of the sort order - do: search: @@ -181,8 +181,8 @@ setup: --- "doesn't fail when asked to run on a field without unigrams when force_unigrams=false": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: search: rest_total_hits_as_int: true @@ -213,8 +213,8 @@ setup: --- "reverse suggestions": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: search: rest_total_hits_as_int: true diff --git a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml index 3cb8e09c70aed..1d6a938675e39 100644 --- a/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml +++ b/modules/mapper-extras/src/yamlRestTest/resources/rest-api-spec/test/search-as-you-type/20_highlighting_field_match_only_text.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml index 085c5633ac72b..044ae5dd6a94d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.validate_query/20_query_string_field_match_only_text.yml @@ -1,8 +1,8 @@ --- "validate_query with query_string parameters": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml index 7a96536a2e261..d1cc6c8295bd9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms_field_match_only_text.yml @@ -1,8 +1,8 @@ --- "Default index": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: goodbad diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml index bc41f157dfdc4..e21c4fb946d85 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text_field_match_only_text.yml @@ -1,8 +1,8 @@ --- "Default index": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: goodbad @@ -78,8 +78,8 @@ --- "Dedup noise": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: goodbad diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml index 7100d620bf19e..9e60d69bfedd7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.inner_hits/20_highlighting_field_match_only_text.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: index: test diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml index 03626236604a1..69c639a8f506a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/160_exists_query_match_only_text.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" features: ["headers"] - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml index a41b8d353e3e9..13fd6b3858948 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/200_phrase_search_field_match_only_text.yml @@ -1,8 +1,8 @@ --- "search with indexed phrases": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml index fc4e9f9de0f38..682a7dded1e9b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix_field_match_only_text.yml @@ -1,7 +1,7 @@ setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml index f4faf87eb83cc..00e54e43d6f04 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/320_disallow_queries_field_match_only_text.yml @@ -1,8 +1,8 @@ --- setup: - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml index cc15796e4697f..44adb48c8765e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/10_basic_field_match_only_field.yml @@ -1,8 +1,8 @@ --- "Search shards aliases with and without filters": - skip: - version: " - 2.99.99" - reason: "match_only_text was added in 3.0" + version: " - 2.11.99" + reason: "match_only_text was added in 2.12" - do: indices.create: From 714fa73c4bc4c0cbaa624629c724e57be8405209 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Thu, 4 Jan 2024 15:13:10 -0800 Subject: [PATCH 36/81] Add logging for tests in RemoteStoreStatsIT to catch assertion failure cause (#11734) * Add logging for tests in RemoteStoreStatsIT to catch assertion failure cause Signed-off-by: Poojita Raj * Add test logging annotation for trace logs Signed-off-by: Poojita Raj --------- Signed-off-by: Poojita Raj --- .../remotestore/RemoteStoreStatsIT.java | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 2d3ab135d0377..4a0af206b9d89 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -29,6 +29,7 @@ import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.test.transport.MockTransportService; import java.io.IOException; @@ -249,6 +250,7 @@ public void testStatsResponseFromLocalNode() { } } + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { setup(); // Scenario: @@ -277,6 +279,13 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce .collect(Collectors.toList()) .get(0) .getSegmentStats(); + logger.info( + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + zeroStatePrimaryStats.refreshTimeLagMs, + zeroStatePrimaryStats.bytesLag, + zeroStatePrimaryStats.uploadBytesStarted, + zeroStatePrimaryStats.uploadBytesFailed + ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded && zeroStatePrimaryStats.totalUploadsSucceeded == 1 @@ -339,6 +348,7 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce } } + @TestLogging(reason = "Getting trace logs from remote store package", value = "org.opensearch.remotestore:TRACE") public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { setup(); // Scenario: @@ -371,6 +381,13 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr .collect(Collectors.toList()) .get(0) .getSegmentStats(); + logger.info( + "Zero state primary stats: {}ms refresh time lag, {}b bytes lag, {}b upload bytes started and {}b upload bytes failed.", + zeroStatePrimaryStats.refreshTimeLagMs, + zeroStatePrimaryStats.bytesLag, + zeroStatePrimaryStats.uploadBytesStarted, + zeroStatePrimaryStats.uploadBytesFailed + ); assertTrue( zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded && zeroStatePrimaryStats.totalUploadsSucceeded == 1 From 3a3da4fe353500e6d5e46785a594db33280fb56c Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Thu, 4 Jan 2024 16:25:36 -0800 Subject: [PATCH 37/81] [Segment Replication] [Remote Store] Replace overriding mockInternalEngine() in test classes with NRTReplicationEngine. (#11716) * Replace overriding mockInternalEngine() in test classes with NRTReplicationEngine. Signed-off-by: Rishikesh1159 * remove unused comment. Signed-off-by: Rishikesh1159 * Add comment for explaining the conditional logic. Signed-off-by: Rishikesh1159 * Update comment with exact reason for conditional logic. Signed-off-by: Rishikesh1159 --------- Signed-off-by: Rishikesh1159 --- .../indices/replication/SegmentReplicationBaseIT.java | 5 ----- .../replication/SegmentReplicationClusterSettingIT.java | 5 ----- .../remotestore/RemoteIndexPrimaryRelocationIT.java | 5 ----- .../remotestore/RemoteStoreBaseIntegTestCase.java | 5 ----- .../SegmentReplicationUsingRemoteStoreDisruptionIT.java | 5 ----- .../snapshots/SegmentReplicationSnapshotIT.java | 5 ----- .../org/opensearch/test/engine/MockEngineFactory.java | 9 ++++++++- 7 files changed, 8 insertions(+), 31 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 1d93eecd6b245..641f714d33414 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -60,11 +60,6 @@ protected Collection> nodePlugins() { return asList(MockTransportService.TestPlugin.class); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - @Override public Settings indexSettings() { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index c4e8ccfc0ecec..f2cb7c9c6bfc8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -52,11 +52,6 @@ public Settings indexSettings() { .build(); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - public void testIndexReplicationSettingOverridesSegRepClusterSetting() throws Exception { Settings settings = Settings.builder().put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); final String ANOTHER_INDEX = "test-index"; diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index d8b7718a55377..67316ed0e6e6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -35,11 +35,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 8c15ebd0505d9..d23e634bb3368 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -121,11 +121,6 @@ protected Map indexData(int numberOfIterations, boolean invokeFlus return indexingStats; } - @Override - protected boolean addMockInternalEngine() { - return false; - } - @Override protected Settings nodeSettings(int nodeOrdinal) { if (segmentRepoPath == null || translogRepoPath == null) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java index d5cdc22a15478..8372135fc55c4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreDisruptionIT.java @@ -44,11 +44,6 @@ public Settings indexSettings() { return remoteStoreIndexSettings(1); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - public void testCancelReplicationWhileSyncingSegments() throws Exception { Path location = randomRepoPath().toAbsolutePath(); setup(location, 0d, "metadata", Long.MAX_VALUE, 1); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java index 2c12c0abb202b..c649c4ab13e7e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SegmentReplicationSnapshotIT.java @@ -74,11 +74,6 @@ public Settings restoreIndexDocRepSettings() { return Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); } - @Override - protected boolean addMockInternalEngine() { - return false; - } - public void ingestData(int docCount, String indexName) throws Exception { for (int i = 0; i < docCount; i++) { client().prepareIndex(indexName).setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); diff --git a/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java b/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java index 30cc48c588be1..102c641746b01 100644 --- a/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java +++ b/test/framework/src/main/java/org/opensearch/test/engine/MockEngineFactory.java @@ -35,6 +35,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.engine.NRTReplicationEngine; public final class MockEngineFactory implements EngineFactory { @@ -46,6 +47,12 @@ public MockEngineFactory(Class wrapper) { @Override public Engine newReadWriteEngine(EngineConfig config) { - return new MockInternalEngine(config, wrapper); + + /** + * Segment replication enabled replicas (i.e. read only replicas) do not use an InternalEngine so a MockInternalEngine + * will not work and an NRTReplicationEngine must be used instead. The primary shards for these indexes will + * still use a MockInternalEngine. + */ + return config.isReadOnlyReplica() ? new NRTReplicationEngine(config) : new MockInternalEngine(config, wrapper); } } From ab0f70eef70d9c75eabc4abfc8e7f1073d739ced Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Fri, 5 Jan 2024 09:20:56 -0800 Subject: [PATCH 38/81] Introduce a new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality (#11717) * Introduce a new feature flag to gate the writeable remote index functionality. Signed-off-by: Rishikesh1159 * Add changelog entry. Signed-off-by: Rishikesh1159 * Update changelog entry. Signed-off-by: Rishikesh1159 --------- Signed-off-by: Rishikesh1159 --- CHANGELOG.md | 1 + .../common/settings/FeatureFlagSettings.java | 3 ++- .../org/opensearch/common/util/FeatureFlags.java | 12 ++++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0958dd41d5a84..f7aa083eb2bcb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) - Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) - Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) +- Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) ### Dependencies - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 387b0c9753574..d3285c379bcc4 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -40,7 +40,8 @@ protected FeatureFlagSettings( FeatureFlags.IDENTITY_SETTING, FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, FeatureFlags.TELEMETRY_SETTING, - FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING + FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, + FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 4e9b417e3433b..c54772caa574b 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -60,6 +60,12 @@ public class FeatureFlags { */ public static final String DATETIME_FORMATTER_CACHING = "opensearch.experimental.optimization.datetime_formatter_caching.enabled"; + /** + * Gates the functionality of writeable remote index + * Once the feature is ready for release, this feature flag can be removed. + */ + public static final String WRITEABLE_REMOTE_INDEX = "opensearch.experimental.feature.writeable_remote_index.enabled"; + /** * Should store the settings from opensearch.yml. */ @@ -122,4 +128,10 @@ public static boolean isEnabled(Setting featureFlag) { true, Property.NodeScope ); + + public static final Setting WRITEABLE_REMOTE_INDEX_SETTING = Setting.boolSetting( + WRITEABLE_REMOTE_INDEX, + false, + Property.NodeScope + ); } From cbfe160f32c845fc52a04405797ef90dcdc3e88a Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 5 Jan 2024 15:24:46 -0500 Subject: [PATCH 39/81] Bump netty from 4.1.100.Final to 4.1.104.Final (#11775) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 +- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.104.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.101.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.101.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.104.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.104.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.104.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.101.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.104.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.101.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.104.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.104.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.101.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.104.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.101.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.104.Final.jar.sha1 | 1 + 90 files changed, 46 insertions(+), 46 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index f7aa083eb2bcb..140c4b07ab6e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -139,7 +139,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `stefanzweifel/git-auto-commit-action` from 4 to 5 ([#11171](https://github.com/opensearch-project/OpenSearch/pull/11171)) - Bump `actions/github-script` from 6 to 7 ([#11271](https://github.com/opensearch-project/OpenSearch/pull/11271)) - Bump `jackson` and `jackson_databind` from 2.15.2 to 2.16.0 ([#11273](https://github.com/opensearch-project/OpenSearch/pull/11273)) -- Bump `netty` from 4.1.100.Final to 4.1.101.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294)) +- Bump `netty` from 4.1.100.Final to 4.1.104.Final ([#11294](https://github.com/opensearch-project/OpenSearch/pull/11294), [#11775](https://github.com/opensearch-project/OpenSearch/pull/11775)) - Bump `com.avast.gradle:gradle-docker-compose-plugin` from 0.16.12 to 0.17.6 ([#10163](https://github.com/opensearch-project/OpenSearch/pull/10163), [#11692](https://github.com/opensearch-project/OpenSearch/pull/11692)) - Bump `com.squareup.okhttp3:okhttp` from 4.11.0 to 4.12.0 ([#10861](https://github.com/opensearch-project/OpenSearch/pull/10861)) - Bump `org.apache.commons:commons-text` from 1.10.0 to 1.11.0 ([#11344](https://github.com/opensearch-project/OpenSearch/pull/11344)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 74d655cfb1045..58b54e7b77a1e 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -26,7 +26,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.13.0 -netty = 4.1.101.Final +netty = 4.1.104.Final joda = 2.12.2 # project reactor diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 deleted file mode 100644 index 7bb27bbfcb87d..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..30f215e47f8ad --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 deleted file mode 100644 index c792b4e834030..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..9ed9b896d4b4e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 deleted file mode 100644 index 8bf31f6c3a2c7..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..478e7cfba1470 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 deleted file mode 100644 index 48b42bfab9287..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..f0242709f34f7 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4fe86799eaff..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..1b533eea3b3b3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 deleted file mode 100644 index cfc0befee237a..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..70777be4dc636 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4425d5ac8e5f..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..d7c15af9312fe --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 deleted file mode 100644 index fd9199092452b..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..5cacaf11a29ce --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index 291c87ad7987b..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..64797bf11aedc --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 deleted file mode 100644 index 0789dea62778a..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24d0f4f2b0b0b55acc498d62f9c0921d0ea6da7b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..0232fc58f9357 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +8e237ce67ab230ed1ba749d6651b278333c21b3f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 deleted file mode 100644 index 48b42bfab9287..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..f0242709f34f7 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 deleted file mode 100644 index fb2d1aa25a5ec..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cbaac9e2c2d7b86e4c1bda220cde49949c9bcaee \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..3b6cd3524d978 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +5b126ceba61275f38297cacd5ea0cd6d3addee04 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 deleted file mode 100644 index ca030677c32d0..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c198ee61abba7ae6b3cb6b1033f04fb0ae79d512 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..9d01e814971f2 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +50a2d899a8f8a68daed1a9b6d7750184310cc45f \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 deleted file mode 100644 index 450c34d975bb9..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3f503d7554cf25edc6b83b9b4724b240c6e91d5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..987b524aedc98 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +f1210e5856fecb9182d58c0d33fa6e946b344b40 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index 291c87ad7987b..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..64797bf11aedc --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 deleted file mode 100644 index 093ced04bee4b..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2761d380bc97e6ce64745dd9ca04538cedd40a5b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..9110503f67304 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +d75246285e5fac6f6dad47e387ed4f46f36e521d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 deleted file mode 100644 index 7bb27bbfcb87d..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..30f215e47f8ad --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 deleted file mode 100644 index c792b4e834030..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..9ed9b896d4b4e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 deleted file mode 100644 index 8bf31f6c3a2c7..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..478e7cfba1470 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 deleted file mode 100644 index 48b42bfab9287..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..f0242709f34f7 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4fe86799eaff..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..1b533eea3b3b3 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 deleted file mode 100644 index cfc0befee237a..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..70777be4dc636 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4425d5ac8e5f..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..d7c15af9312fe --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 deleted file mode 100644 index fd9199092452b..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..5cacaf11a29ce --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 deleted file mode 100644 index a710d964c1803..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f86303830ab65680e45cabb531e37078ecc6791e \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..522d85a3bf12e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +700fdbabab44709b0eccffe8f91c4226a5787356 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index 291c87ad7987b..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..64797bf11aedc --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 deleted file mode 100644 index 7bb27bbfcb87d..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..30f215e47f8ad --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 deleted file mode 100644 index c792b4e834030..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..9ed9b896d4b4e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 deleted file mode 100644 index 8bf31f6c3a2c7..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..478e7cfba1470 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4fe86799eaff..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..1b533eea3b3b3 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 deleted file mode 100644 index cfc0befee237a..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..70777be4dc636 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4425d5ac8e5f..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..d7c15af9312fe --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 deleted file mode 100644 index fd9199092452b..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..5cacaf11a29ce --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 deleted file mode 100644 index 7bb27bbfcb87d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4d94fd6cdf7a37b15237e32434afd6b955cc591 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..30f215e47f8ad --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +756797174b94a3aee11ce83522473f3c18287a43 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 deleted file mode 100644 index c792b4e834030..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -642523c57c4be15b8b461be7b1532262d193bbb3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..9ed9b896d4b4e --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +f51fcfd3baac88b2c0b8dc715932ad5622d17429 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 deleted file mode 100644 index 0789dea62778a..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -24d0f4f2b0b0b55acc498d62f9c0921d0ea6da7b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..0232fc58f9357 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +8e237ce67ab230ed1ba749d6651b278333c21b3f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 deleted file mode 100644 index 8bf31f6c3a2c7..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c648f863b301e3fc62d0de098ec61be7628b8ea2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..478e7cfba1470 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +2db1556de1b8dc07695604bf51a0a133263ad63f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 deleted file mode 100644 index 48b42bfab9287..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -40590da6c615b852384542051330e9fd51d4c4b1 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..f0242709f34f7 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +5bb757929f7c4d1bf12740a378a99643caaad1ac \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4fe86799eaff..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bdb16e4d308b5757123decc886896815d1daf7a3 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..1b533eea3b3b3 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +ec5fc4a7c5475eb20805e14f7274aa28872b5ba1 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 deleted file mode 100644 index cfc0befee237a..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7800aea949bf95f63df73166d144e49405b279dd \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..70777be4dc636 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +18c0e659950cdef5f12c36eccfa14cbd2ad2049d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 deleted file mode 100644 index f4425d5ac8e5f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e7eddfa4dfffcbd375d265d1fd08297df94f82f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..d7c15af9312fe --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +dfa4fe5c3a6eabb7af09902eb63266829876d8a2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 deleted file mode 100644 index 450c34d975bb9..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d3f503d7554cf25edc6b83b9b4724b240c6e91d5 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..987b524aedc98 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +f1210e5856fecb9182d58c0d33fa6e946b344b40 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 deleted file mode 100644 index fd9199092452b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b36cc2337b4a4135df7ee2f2d77431c3b541dc5 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..5cacaf11a29ce --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +da7b263b6fedc5add70e78ee8927c8bd2b9bb589 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 deleted file mode 100644 index 291c87ad7987b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.101.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0697ef1d71c6111a1b18a387fa985fd6398ace2 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 new file mode 100644 index 0000000000000..64797bf11aedc --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.104.Final.jar.sha1 @@ -0,0 +1 @@ +d4da9f7237ac3ac292891e0b2d5364acbce128cf \ No newline at end of file From 5f3d782ded6f38c65ec4c00f571e12a77779cf47 Mon Sep 17 00:00:00 2001 From: Harsha Vamsi Kalluri Date: Fri, 5 Jan 2024 13:54:46 -0800 Subject: [PATCH 40/81] Update skip version to match branches (#11776) Signed-off-by: Harsha Vamsi Kalluri --- .../rest-api-spec/test/search/340_doc_values_field.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml index f3281e35ac8e6..c7b00d5fbbef2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -1,8 +1,8 @@ setup: - skip: features: [ "headers" ] - version: " - 2.99.99" - reason: "searching with only doc_values was added in 3.0.0" + version: " - 2.11.99" + reason: "searching with only doc_values was added in 2.12.0" --- "search on fields with both index and doc_values enabled": - do: From 89e47274d5d60d770d3b57161ca77509605d6898 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Mon, 8 Jan 2024 12:24:31 +0530 Subject: [PATCH 41/81] Skip test testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness (#11767) Signed-off-by: Rishab Nahata --- .../org/opensearch/cluster/allocation/AwarenessAllocationIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java index c69718d982f8b..522d63b22a0da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java @@ -364,6 +364,7 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(noZoneNode), equalTo(2)); } + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/5908") public void testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness() throws Exception { int nodeCountPerAZ = 5; int numOfShards = 30; From c7196da0d3a16abd319f5b066cc080dd8c306f99 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 09:47:41 -0500 Subject: [PATCH 42/81] Bump commons-io:commons-io from 2.14.0 to 2.15.1 in /plugins/repository-hdfs (#11796) * Bump commons-io:commons-io in /plugins/repository-hdfs Bumps commons-io:commons-io from 2.14.0 to 2.15.1. --- updated-dependencies: - dependency-name: commons-io:commons-io dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 2 +- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 140c4b07ab6e0..3f273f10596e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,7 +125,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) - Bump Lucene from 9.7.0 to 9.8.0 ([10276](https://github.com/opensearch-project/OpenSearch/pull/10276)) -- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560)) +- Bump `commons-io:commons-io` from 2.13.0 to 2.15.1 ([#10294](https://github.com/opensearch-project/OpenSearch/pull/10294), [#11001](https://github.com/opensearch-project/OpenSearch/pull/11001), [#11002](https://github.com/opensearch-project/OpenSearch/pull/11002), [#11446](https://github.com/opensearch-project/OpenSearch/pull/11446), [#11554](https://github.com/opensearch-project/OpenSearch/pull/11554), [#11560](https://github.com/opensearch-project/OpenSearch/pull/11560), [#11796](https://github.com/opensearch-project/OpenSearch/pull/11796)) - Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298)) - Bump `com.netflix.nebula.ospackage-base` from 11.4.0 to 11.6.0 ([#10295](https://github.com/opensearch-project/OpenSearch/pull/10295), [#11630](https://github.com/opensearch-project/OpenSearch/pull/11630)) - Bump `org.apache.zookeeper:zookeeper` from 3.9.0 to 3.9.1 ([#10506](https://github.com/opensearch-project/OpenSearch/pull/10506)) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index f04d42a2155d6..36843e3bc8700 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -75,7 +75,7 @@ dependencies { api 'commons-collections:commons-collections:3.2.2' api "org.apache.commons:commons-compress:${versions.commonscompress}" api 'org.apache.commons:commons-configuration2:2.9.0' - api 'commons-io:commons-io:2.14.0' + api 'commons-io:commons-io:2.15.1' api 'org.apache.commons:commons-lang3:3.14.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' diff --git a/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 deleted file mode 100644 index 33c5cfe53e01d..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-io-2.14.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a4c6e1f6c196339473cd2e1b037f0eb97c62755b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 b/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 new file mode 100644 index 0000000000000..47c5d13812a36 --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-io-2.15.1.jar.sha1 @@ -0,0 +1 @@ +f11560da189ab563a5c8e351941415430e9304ea \ No newline at end of file From fe98aad152b85e6e90265b99ccf00e55af296088 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Mon, 8 Jan 2024 11:45:07 -0800 Subject: [PATCH 43/81] Use SegmentReplicationTarget test to validate non-active on-disk files are reused for replication (#11786) * Use SegmentReplicationTarget test to validate non-active on-disk files are reused for replication Signed-off-by: Suraj Singh * Fix original shard level unit test Signed-off-by: Suraj Singh --------- Signed-off-by: Suraj Singh --- .../index/shard/RemoteIndexShardTests.java | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index dd92bfb47afdb..eacc504428ef1 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.Version; +import org.opensearch.action.StepListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; @@ -371,7 +372,6 @@ public void testPrimaryRestart() throws Exception { * prevent FileAlreadyExistsException. It does so by only copying files in first round of segment replication without * committing locally so that in next round of segment replication those files are not considered for download again */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); @@ -388,6 +388,7 @@ public void testSegRepSucceedsOnPreviousCopiedFiles() throws Exception { ); CountDownLatch latch = new CountDownLatch(1); + logger.info("--> Starting first round of replication"); // Start first round of segment replication. This should fail with simulated error but with replica having // files in its local store but not in active reader. final SegmentReplicationTarget target = targetService.startReplication( @@ -427,6 +428,7 @@ public void onReplicationFailure( // Start next round of segment replication and not throwing exception resulting in commit on replica when(sourceFactory.get(any())).thenReturn(getRemoteStoreReplicationSource(replica, () -> {})); CountDownLatch waitForSecondRound = new CountDownLatch(1); + logger.info("--> Starting second round of replication"); final SegmentReplicationTarget newTarget = targetService.startReplication( replica, primary.getLatestReplicationCheckpoint(), @@ -560,8 +562,19 @@ public void getSegmentFiles( BiConsumer fileProgressTracker, ActionListener listener ) { - super.getSegmentFiles(replicationId, checkpoint, filesToFetch, indexShard, (fileName, bytesRecovered) -> {}, listener); - postGetFilesRunnable.run(); + StepListener waitForCopyFilesListener = new StepListener(); + super.getSegmentFiles( + replicationId, + checkpoint, + filesToFetch, + indexShard, + (fileName, bytesRecovered) -> {}, + waitForCopyFilesListener + ); + waitForCopyFilesListener.whenComplete(response -> { + postGetFilesRunnable.run(); + listener.onResponse(response); + }, listener::onFailure); } @Override From 97ccb5cbe47b3caab86249574def19b5448846bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 12:34:04 -0800 Subject: [PATCH 44/81] Bump net.java.dev.jna:jna from 5.13.0 to 5.14.0 (#11798) * Bump net.java.dev.jna:jna from 5.13.0 to 5.14.0 Bumps [net.java.dev.jna:jna](https://github.com/java-native-access/jna) from 5.13.0 to 5.14.0. - [Changelog](https://github.com/java-native-access/jna/blob/master/CHANGES.md) - [Commits](https://github.com/java-native-access/jna/compare/5.13.0...5.14.0) --- updated-dependencies: - dependency-name: net.java.dev.jna:jna dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + buildSrc/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f273f10596e6..8fbdbf6000f87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -159,6 +159,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.maxmind.geoip2:geoip2` from 4.1.0 to 4.2.0 ([#11559](https://github.com/opensearch-project/OpenSearch/pull/11559)) - Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) - Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) +- Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index a42976fef572c..3c846b48549fb 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -110,7 +110,7 @@ dependencies { api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' api 'commons-io:commons-io:2.15.1' - api "net.java.dev.jna:jna:5.13.0" + api "net.java.dev.jna:jna:5.14.0" api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" From 0b30f306e2a67cd87b4c20c8bfc06853b9e9f772 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Mon, 8 Jan 2024 12:51:24 -0800 Subject: [PATCH 45/81] Update dropdowns in issue templates to have no option selected by default (#11805) This change updates the bug and feature templates to set the first option as empty. This prevents form submission until an option is selected. Signed-off-by: Marc Handalian --- .github/ISSUE_TEMPLATE/bug_template.yml | 3 ++- .github/ISSUE_TEMPLATE/feature_request.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_template.yml b/.github/ISSUE_TEMPLATE/bug_template.yml index 2cd1ee8a7e688..5f0798abe0f68 100644 --- a/.github/ISSUE_TEMPLATE/bug_template.yml +++ b/.github/ISSUE_TEMPLATE/bug_template.yml @@ -15,7 +15,7 @@ body: description: Choose a specific OpenSearch component your bug belongs to. If you are unsure which to select or if the component is not present, select "Other". multiple: false options: - - Other + - # Empty first option to force selection - Build - Clients - Cluster Manager @@ -24,6 +24,7 @@ body: - Indexing:Replication - Indexing - Libraries + - Other - Plugins - Search:Aggregations - Search:Performance diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index d93ac8b590706..0159e771f7f80 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -22,7 +22,7 @@ body: description: Choose a specific OpenSearch component your feature request belongs to. If you are unsure of which component to select or if the component is not present, select "Other". multiple: false options: - - Other + - # Empty first option to force selection - Build - Clients - Cluster Manager @@ -31,6 +31,7 @@ body: - Indexing:Replication - Indexing - Libraries + - Other - Plugins - Search:Aggregations - Search:Performance From e948c4037f61117d1a16aa058ea7b5b2ea3b6817 Mon Sep 17 00:00:00 2001 From: Siddhant Deshmukh Date: Mon, 8 Jan 2024 13:35:41 -0800 Subject: [PATCH 46/81] Capture query categorization metrics for additional query types and aggregations types (#11582) Signed-off-by: Siddhant Deshmukh --- CHANGELOG.md | 1 + .../SearchQueryAggregationCategorizer.java | 55 ++++++ .../action/search/SearchQueryCategorizer.java | 10 +- .../SearchQueryCategorizingVisitor.java | 38 +--- .../action/search/SearchQueryCounters.java | 185 +++++++++++++++++- .../query/FieldMaskingSpanQueryBuilder.java | 4 +- .../search/SearchQueryCategorizerTests.java | 60 +++--- 7 files changed, 286 insertions(+), 67 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fbdbf6000f87..19bcd75347444 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -185,6 +185,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Automatically add scheme to discovery.ec2.endpoint ([#11512](https://github.com/opensearch-project/OpenSearch/pull/11512)) - Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) - Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) +- Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) ### Deprecated diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java new file mode 100644 index 0000000000000..607ccf182851b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryAggregationCategorizer.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.PipelineAggregationBuilder; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.Collection; + +/** + * Increments the counters related to Aggregation Search Queries. + */ +public class SearchQueryAggregationCategorizer { + + private static final String TYPE_TAG = "type"; + private final SearchQueryCounters searchQueryCounters; + + public SearchQueryAggregationCategorizer(SearchQueryCounters searchQueryCounters) { + this.searchQueryCounters = searchQueryCounters; + } + + public void incrementSearchQueryAggregationCounters(Collection aggregatorFactories) { + for (AggregationBuilder aggregationBuilder : aggregatorFactories) { + incrementCountersRecursively(aggregationBuilder); + } + } + + private void incrementCountersRecursively(AggregationBuilder aggregationBuilder) { + // Increment counters for the current aggregation + String aggregationType = aggregationBuilder.getType(); + searchQueryCounters.aggCounter.add(1, Tags.create().addTag(TYPE_TAG, aggregationType)); + + // Recursively process sub-aggregations if any + Collection subAggregations = aggregationBuilder.getSubAggregations(); + if (subAggregations != null && !subAggregations.isEmpty()) { + for (AggregationBuilder subAggregation : subAggregations) { + incrementCountersRecursively(subAggregation); + } + } + + // Process pipeline aggregations + Collection pipelineAggregations = aggregationBuilder.getPipelineAggregations(); + for (PipelineAggregationBuilder pipelineAggregation : pipelineAggregations) { + String pipelineAggregationType = pipelineAggregation.getType(); + searchQueryCounters.aggCounter.add(1, Tags.create().addTag(TYPE_TAG, pipelineAggregationType)); + } + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java index 8fe1be610f9af..ffaae5b08772f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizer.java @@ -32,13 +32,15 @@ final class SearchQueryCategorizer { final SearchQueryCounters searchQueryCounters; + final SearchQueryAggregationCategorizer searchQueryAggregationCategorizer; + public SearchQueryCategorizer(MetricsRegistry metricsRegistry) { searchQueryCounters = new SearchQueryCounters(metricsRegistry); + searchQueryAggregationCategorizer = new SearchQueryAggregationCategorizer(searchQueryCounters); } public void categorize(SearchSourceBuilder source) { QueryBuilder topLevelQueryBuilder = source.query(); - logQueryShape(topLevelQueryBuilder); incrementQueryTypeCounters(topLevelQueryBuilder); incrementQueryAggregationCounters(source.aggregations()); @@ -56,9 +58,11 @@ private void incrementQuerySortCounters(List> sorts) { } private void incrementQueryAggregationCounters(AggregatorFactories.Builder aggregations) { - if (aggregations != null) { - searchQueryCounters.aggCounter.add(1); + if (aggregations == null) { + return; } + + searchQueryAggregationCategorizer.incrementSearchQueryAggregationCounters(aggregations.getAggregatorFactories()); } private void incrementQueryTypeCounters(QueryBuilder topLevelQueryBuilder) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java index 98f0169e69a5c..31f83dbef9dc9 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCategorizingVisitor.java @@ -9,26 +9,14 @@ package org.opensearch.action.search; import org.apache.lucene.search.BooleanClause; -import org.opensearch.index.query.BoolQueryBuilder; -import org.opensearch.index.query.MatchPhraseQueryBuilder; -import org.opensearch.index.query.MatchQueryBuilder; -import org.opensearch.index.query.MultiMatchQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilderVisitor; -import org.opensearch.index.query.QueryStringQueryBuilder; -import org.opensearch.index.query.RangeQueryBuilder; -import org.opensearch.index.query.RegexpQueryBuilder; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.index.query.WildcardQueryBuilder; -import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; -import org.opensearch.telemetry.metrics.tags.Tags; /** - * Class to visit the querybuilder tree and also track the level information. + * Class to visit the query builder tree and also track the level information. * Increments the counters related to Search Query type. */ final class SearchQueryCategorizingVisitor implements QueryBuilderVisitor { - private static final String LEVEL_TAG = "level"; private final int level; private final SearchQueryCounters searchQueryCounters; @@ -42,29 +30,7 @@ private SearchQueryCategorizingVisitor(SearchQueryCounters counters, int level) } public void accept(QueryBuilder qb) { - if (qb instanceof BoolQueryBuilder) { - searchQueryCounters.boolCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof FunctionScoreQueryBuilder) { - searchQueryCounters.functionScoreCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof MatchQueryBuilder) { - searchQueryCounters.matchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof MatchPhraseQueryBuilder) { - searchQueryCounters.matchPhrasePrefixCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof MultiMatchQueryBuilder) { - searchQueryCounters.multiMatchCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof QueryStringQueryBuilder) { - searchQueryCounters.queryStringQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof RangeQueryBuilder) { - searchQueryCounters.rangeCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof RegexpQueryBuilder) { - searchQueryCounters.regexCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof TermQueryBuilder) { - searchQueryCounters.termCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else if (qb instanceof WildcardQueryBuilder) { - searchQueryCounters.wildcardCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } else { - searchQueryCounters.otherQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); - } + searchQueryCounters.incrementCounter(qb, level); } public QueryBuilderVisitor getChildVisitor(BooleanClause.Occur occur) { diff --git a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java index 7e0259af07701..bbb883809b41b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java +++ b/server/src/main/java/org/opensearch/action/search/SearchQueryCounters.java @@ -8,33 +8,82 @@ package org.opensearch.action.search; +import org.opensearch.index.query.BoolQueryBuilder; +import org.opensearch.index.query.BoostingQueryBuilder; +import org.opensearch.index.query.ConstantScoreQueryBuilder; +import org.opensearch.index.query.DisMaxQueryBuilder; +import org.opensearch.index.query.DistanceFeatureQueryBuilder; +import org.opensearch.index.query.ExistsQueryBuilder; +import org.opensearch.index.query.FieldMaskingSpanQueryBuilder; +import org.opensearch.index.query.FuzzyQueryBuilder; +import org.opensearch.index.query.GeoBoundingBoxQueryBuilder; +import org.opensearch.index.query.GeoDistanceQueryBuilder; +import org.opensearch.index.query.GeoPolygonQueryBuilder; +import org.opensearch.index.query.GeoShapeQueryBuilder; +import org.opensearch.index.query.IntervalQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.MatchPhraseQueryBuilder; +import org.opensearch.index.query.MatchQueryBuilder; +import org.opensearch.index.query.MultiMatchQueryBuilder; +import org.opensearch.index.query.PrefixQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryStringQueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.RegexpQueryBuilder; +import org.opensearch.index.query.ScriptQueryBuilder; +import org.opensearch.index.query.SimpleQueryStringBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.WildcardQueryBuilder; +import org.opensearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.opensearch.telemetry.metrics.Counter; import org.opensearch.telemetry.metrics.MetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; + +import java.util.HashMap; +import java.util.Map; /** * Class contains all the Counters related to search query types. */ final class SearchQueryCounters { + private static final String LEVEL_TAG = "level"; private static final String UNIT = "1"; private final MetricsRegistry metricsRegistry; // Counters related to Query types public final Counter aggCounter; public final Counter boolCounter; + public final Counter boostingCounter; + public final Counter constantScoreCounter; + public final Counter disMaxCounter; + public final Counter distanceFeatureCounter; + public final Counter existsCounter; + public final Counter fieldMaskingSpanCounter; public final Counter functionScoreCounter; + public final Counter fuzzyCounter; + public final Counter geoBoundingBoxCounter; + public final Counter geoDistanceCounter; + public final Counter geoPolygonCounter; + public final Counter geoShapeCounter; + public final Counter intervalCounter; public final Counter matchCounter; + public final Counter matchallCounter; public final Counter matchPhrasePrefixCounter; public final Counter multiMatchCounter; public final Counter otherQueryCounter; - public final Counter queryStringQueryCounter; + public final Counter prefixCounter; + public final Counter queryStringCounter; public final Counter rangeCounter; - public final Counter regexCounter; - + public final Counter regexpCounter; + public final Counter scriptCounter; + public final Counter simpleQueryStringCounter; public final Counter sortCounter; public final Counter skippedCounter; public final Counter termCounter; public final Counter totalCounter; public final Counter wildcardCounter; + public final Counter numberOfInputFieldsCounter; + private final Map, Counter> queryHandlers; public SearchQueryCounters(MetricsRegistry metricsRegistry) { this.metricsRegistry = metricsRegistry; @@ -48,16 +97,81 @@ public SearchQueryCounters(MetricsRegistry metricsRegistry) { "Counter for the number of top level and nested bool search queries", UNIT ); + this.boostingCounter = metricsRegistry.createCounter( + "search.query.type.boost.count", + "Counter for the number of top level and nested boost search queries", + UNIT + ); + this.constantScoreCounter = metricsRegistry.createCounter( + "search.query.type.counstantscore.count", + "Counter for the number of top level and nested constant score search queries", + UNIT + ); + this.disMaxCounter = metricsRegistry.createCounter( + "search.query.type.dismax.count", + "Counter for the number of top level and nested disjuntion max search queries", + UNIT + ); + this.distanceFeatureCounter = metricsRegistry.createCounter( + "search.query.type.distancefeature.count", + "Counter for the number of top level and nested distance feature search queries", + UNIT + ); + this.existsCounter = metricsRegistry.createCounter( + "search.query.type.exists.count", + "Counter for the number of top level and nested exists search queries", + UNIT + ); + this.fieldMaskingSpanCounter = metricsRegistry.createCounter( + "search.query.type.fieldmaskingspan.count", + "Counter for the number of top level and nested field masking span search queries", + UNIT + ); this.functionScoreCounter = metricsRegistry.createCounter( "search.query.type.functionscore.count", "Counter for the number of top level and nested function score search queries", UNIT ); + this.fuzzyCounter = metricsRegistry.createCounter( + "search.query.type.fuzzy.count", + "Counter for the number of top level and nested fuzzy search queries", + UNIT + ); + this.geoBoundingBoxCounter = metricsRegistry.createCounter( + "search.query.type.geoboundingbox.count", + "Counter for the number of top level and nested geo bounding box queries", + UNIT + ); + this.geoDistanceCounter = metricsRegistry.createCounter( + "search.query.type.geodistance.count", + "Counter for the number of top level and nested geo distance queries", + UNIT + ); + this.geoPolygonCounter = metricsRegistry.createCounter( + "search.query.type.geopolygon.count", + "Counter for the number of top level and nested geo polygon queries", + UNIT + ); + this.geoShapeCounter = metricsRegistry.createCounter( + "search.query.type.geoshape.count", + "Counter for the number of top level and nested geo shape queries", + UNIT + ); + this.intervalCounter = metricsRegistry.createCounter( + "search.query.type.interval.count", + "Counter for the number of top level and nested interval queries", + UNIT + ); this.matchCounter = metricsRegistry.createCounter( "search.query.type.match.count", "Counter for the number of top level and nested match search queries", UNIT ); + this.matchallCounter = metricsRegistry.createCounter( + "search.query.type.matchall.count", + "Counter for the number of top level and nested match all search queries", + UNIT + ); this.matchPhrasePrefixCounter = metricsRegistry.createCounter( "search.query.type.matchphrase.count", "Counter for the number of top level and nested match phrase prefix search queries", @@ -73,7 +187,12 @@ public SearchQueryCounters(MetricsRegistry metricsRegistry) { "Counter for the number of top level and nested search queries that do not match any other categories", UNIT ); - this.queryStringQueryCounter = metricsRegistry.createCounter( + this.prefixCounter = metricsRegistry.createCounter( + "search.query.type.prefix.count", + "Counter for the number of top level and nested search queries that match prefix queries", + UNIT + ); + this.queryStringCounter = metricsRegistry.createCounter( "search.query.type.querystringquery.count", "Counter for the number of top level and nested queryStringQuery search queries", UNIT @@ -83,11 +202,21 @@ public SearchQueryCounters(MetricsRegistry metricsRegistry) { "Counter for the number of top level and nested range search queries", UNIT ); - this.regexCounter = metricsRegistry.createCounter( + this.regexpCounter = metricsRegistry.createCounter( "search.query.type.regex.count", "Counter for the number of top level and nested regex search queries", UNIT ); + this.scriptCounter = metricsRegistry.createCounter( + "search.query.type.script.count", + "Counter for the number of top level and nested script search queries", + UNIT + ); + this.simpleQueryStringCounter = metricsRegistry.createCounter( + "search.query.type.simplequerystring.count", + "Counter for the number of top level and nested script simple query string search queries", + UNIT + ); this.skippedCounter = metricsRegistry.createCounter( "search.query.type.skipped.count", "Counter for the number queries skipped due to error", @@ -113,5 +242,51 @@ public SearchQueryCounters(MetricsRegistry metricsRegistry) { "Counter for the number of top level and nested wildcard search queries", UNIT ); + this.numberOfInputFieldsCounter = metricsRegistry.createCounter( + "search.query.type.numberofinputfields.count", + "Counter for the number of input fields in the search queries", + UNIT + ); + this.queryHandlers = new HashMap<>(); + initializeQueryHandlers(); + } + + public void incrementCounter(QueryBuilder queryBuilder, int level) { + Counter counter = queryHandlers.get(queryBuilder.getClass()); + if (counter != null) { + counter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } else { + otherQueryCounter.add(1, Tags.create().addTag(LEVEL_TAG, level)); + } + } + + private void initializeQueryHandlers() { + + queryHandlers.put(BoolQueryBuilder.class, boolCounter); + queryHandlers.put(FunctionScoreQueryBuilder.class, functionScoreCounter); + queryHandlers.put(MatchQueryBuilder.class, matchCounter); + queryHandlers.put(MatchPhraseQueryBuilder.class, matchPhrasePrefixCounter); + queryHandlers.put(MultiMatchQueryBuilder.class, multiMatchCounter); + queryHandlers.put(QueryStringQueryBuilder.class, queryStringCounter); + queryHandlers.put(RangeQueryBuilder.class, rangeCounter); + queryHandlers.put(RegexpQueryBuilder.class, regexpCounter); + queryHandlers.put(TermQueryBuilder.class, termCounter); + queryHandlers.put(WildcardQueryBuilder.class, wildcardCounter); + queryHandlers.put(BoostingQueryBuilder.class, boostingCounter); + queryHandlers.put(ConstantScoreQueryBuilder.class, constantScoreCounter); + queryHandlers.put(DisMaxQueryBuilder.class, disMaxCounter); + queryHandlers.put(DistanceFeatureQueryBuilder.class, distanceFeatureCounter); + queryHandlers.put(ExistsQueryBuilder.class, existsCounter); + queryHandlers.put(FieldMaskingSpanQueryBuilder.class, fieldMaskingSpanCounter); + queryHandlers.put(FuzzyQueryBuilder.class, fuzzyCounter); + queryHandlers.put(GeoBoundingBoxQueryBuilder.class, geoBoundingBoxCounter); + queryHandlers.put(GeoDistanceQueryBuilder.class, geoDistanceCounter); + queryHandlers.put(GeoPolygonQueryBuilder.class, geoPolygonCounter); + queryHandlers.put(GeoShapeQueryBuilder.class, geoShapeCounter); + queryHandlers.put(IntervalQueryBuilder.class, intervalCounter); + queryHandlers.put(MatchAllQueryBuilder.class, matchallCounter); + queryHandlers.put(PrefixQueryBuilder.class, prefixCounter); + queryHandlers.put(ScriptQueryBuilder.class, scriptCounter); + queryHandlers.put(SimpleQueryStringBuilder.class, simpleQueryStringCounter); } } diff --git a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java index 1162689a54689..4e73d87b07b7a 100644 --- a/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -54,7 +54,9 @@ * @opensearch.internal */ public class FieldMaskingSpanQueryBuilder extends AbstractQueryBuilder implements SpanQueryBuilder { - public static final ParseField SPAN_FIELD_MASKING_FIELD = new ParseField("span_field_masking", "field_masking_span"); + + public static final String NAME = "span_field_masking"; + public static final ParseField SPAN_FIELD_MASKING_FIELD = new ParseField(NAME, "field_masking_span"); private static final ParseField FIELD_FIELD = new ParseField("field"); private static final ParseField QUERY_FIELD = new ParseField("query"); diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java index a2e301143d694..17fa124890158 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryCategorizerTests.java @@ -34,16 +34,19 @@ import java.util.Arrays; -import org.mockito.Mockito; +import org.mockito.ArgumentCaptor; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public final class SearchQueryCategorizerTests extends OpenSearchTestCase { + private static final String MULTI_TERMS_AGGREGATION = "multi_terms"; + private MetricsRegistry metricsRegistry; private SearchQueryCategorizer searchQueryCategorizer; @@ -71,7 +74,20 @@ public void testAggregationsQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d), any(Tags.class)); + + // capture the arguments passed to the aggCounter.add method + ArgumentCaptor valueCaptor = ArgumentCaptor.forClass(Double.class); + ArgumentCaptor tagsCaptor = ArgumentCaptor.forClass(Tags.class); + + // Verify that aggCounter.add was called with the expected arguments + verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(valueCaptor.capture(), tagsCaptor.capture()); + + double actualValue = valueCaptor.getValue(); + String actualTag = (String) tagsCaptor.getValue().getTagsMap().get("type"); + + assertEquals(1.0d, actualValue, 0.0001); + assertEquals(MULTI_TERMS_AGGREGATION, actualTag); } public void testBoolQuery() { @@ -81,8 +97,8 @@ public void testBoolQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); } public void testFunctionScoreQuery() { @@ -92,7 +108,7 @@ public void testFunctionScoreQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.functionScoreCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.functionScoreCounter).add(eq(1.0d), any(Tags.class)); } public void testMatchQuery() { @@ -102,7 +118,7 @@ public void testMatchQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); } public void testMatchPhraseQuery() { @@ -112,7 +128,7 @@ public void testMatchPhraseQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchPhrasePrefixCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.matchPhrasePrefixCounter).add(eq(1.0d), any(Tags.class)); } public void testMultiMatchQuery() { @@ -122,7 +138,7 @@ public void testMultiMatchQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.multiMatchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.multiMatchCounter).add(eq(1.0d), any(Tags.class)); } public void testOtherQuery() { @@ -136,8 +152,8 @@ public void testOtherQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.otherQueryCounter, times(2)).add(eq(1.0d), any(Tags.class)); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.otherQueryCounter, times(1)).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); } public void testQueryStringQuery() { @@ -148,7 +164,7 @@ public void testQueryStringQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.queryStringQueryCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.queryStringCounter).add(eq(1.0d), any(Tags.class)); } public void testRangeQuery() { @@ -160,7 +176,7 @@ public void testRangeQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.rangeCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.rangeCounter).add(eq(1.0d), any(Tags.class)); } public void testRegexQuery() { @@ -169,7 +185,7 @@ public void testRegexQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.regexpCounter).add(eq(1.0d), any(Tags.class)); } public void testSortQuery() { @@ -180,8 +196,8 @@ public void testSortQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.sortCounter, times(2)).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.sortCounter, times(2)).add(eq(1.0d), any(Tags.class)); } public void testTermQuery() { @@ -191,7 +207,7 @@ public void testTermQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); } public void testWildcardQuery() { @@ -201,7 +217,7 @@ public void testWildcardQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.wildcardCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.wildcardCounter).add(eq(1.0d), any(Tags.class)); } public void testComplexQuery() { @@ -219,10 +235,10 @@ public void testComplexQuery() { searchQueryCategorizer.categorize(sourceBuilder); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.regexCounter).add(eq(1.0d), any(Tags.class)); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); - Mockito.verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d)); + verify(searchQueryCategorizer.searchQueryCounters.termCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.matchCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.regexpCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.boolCounter).add(eq(1.0d), any(Tags.class)); + verify(searchQueryCategorizer.searchQueryCounters.aggCounter).add(eq(1.0d), any(Tags.class)); } } From 2de44a7b771c9b8b59f57069d0fdfdf9ee818ec2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 15:28:32 -0800 Subject: [PATCH 47/81] Bump lycheeverse/lychee-action from 1.8.0 to 1.9.0 (#11795) * Bump lycheeverse/lychee-action from 1.8.0 to 1.9.0 Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 1.8.0 to 1.9.0. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v1.8.0...v1.9.0) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/links.yml | 2 +- CHANGELOG.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index ca026f530b4af..2714d45bd108f 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.8.0 + uses: lycheeverse/lychee-action@v1.9.0 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index 19bcd75347444..c0d39f1724353 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -160,6 +160,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) - Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) - Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) +- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.0 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) From a1e4602b8dcd5676d1e7b410d86242d7a555d619 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 9 Jan 2024 22:11:04 +0800 Subject: [PATCH 48/81] Enable must_exist parameter for update aliases API (#11210) * Enable must_exist parameter for update aliases API Signed-off-by: Gao Binlong * modify changelog Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Remove silently when all aliases are non-existing and must_exist is false Signed-off-by: Gao Binlong * Modify some comments to run gradle check again Signed-off-by: Gao Binlong * Add more yaml test Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../40_remove_with_must_exist.yml | 141 ++++++++++++++++++ .../indices/alias/IndicesAliasesRequest.java | 14 +- .../alias/TransportIndicesAliasesAction.java | 25 +++- .../cluster/metadata/AliasAction.java | 5 +- .../indices/alias/AliasActionsTests.java | 7 + .../MetadataIndexAliasesServiceTests.java | 7 +- .../alias/RandomAliasActionsGenerator.java | 5 + 8 files changed, 196 insertions(+), 9 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index c0d39f1724353..45ab53b0d4246 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -107,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - [Streaming Indexing] Introduce new experimental server HTTP transport based on Netty 4 and Project Reactor (Reactor Netty) ([#9672](https://github.com/opensearch-project/OpenSearch/pull/9672)) +- Enable must_exist parameter for update aliases API ([#11210](https://github.com/opensearch-project/OpenSearch/pull/11210)) - Add back half_float BKD based sort query optimization ([#11024](https://github.com/opensearch-project/OpenSearch/pull/11024)) - Request level coordinator slow logs ([#10650](https://github.com/opensearch-project/OpenSearch/pull/10650)) - Add template snippets support for field and target_field in KV ingest processor ([#10040](https://github.com/opensearch-project/OpenSearch/pull/10040)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml new file mode 100644 index 0000000000000..dbf6a4fad3295 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml @@ -0,0 +1,141 @@ +--- +"Throw aliases missing exception when removing non-existing alias with setting must_exist to true": + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + catch: /aliases \[test_alias\] missing/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + alias: test_alias + must_exist: true + + - do: + catch: /aliases \[testAlias\*\] missing/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [ testAlias* ] + must_exist: true + + - do: + catch: /\[aliases\] can't be empty/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [] + must_exist: true + +--- +"Throw aliases missing exception when all of the specified aliases are non-existing": + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + catch: /aliases \[test\_alias\] missing/ + indices.update_aliases: + body: + actions: + - remove: + index: test_index + alias: test_alias + + - do: + catch: /aliases \[test\_alias\*\] missing/ + indices.update_aliases: + body: + actions: + - remove: + indices: [ test_index ] + aliases: [ test_alias* ] + +--- +"Remove successfully when some specified aliases are non-existing": + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + indices.update_aliases: + body: + actions: + - add: + indices: [ test_index ] + aliases: [ test_alias ] + + - do: + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [test_alias, test_alias1, test_alias2] + must_exist: false + + - match: { acknowledged: true } + +--- +"Remove silently when all of the specified aliases are non-existing and must_exist is false": + - skip: + version: " - 2.99.99" + reason: "introduced in 3.0" + + - do: + indices.create: + index: test_index + + - do: + indices.exists_alias: + name: test_alias + + - is_false: '' + + - do: + indices.update_aliases: + body: + actions: + - remove: + index: test_index + aliases: [test_alias, test_alias1, test_alias2] + must_exist: false + + - match: { acknowledged: true } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index bc27c70282368..6ce62dda32d0a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -220,9 +220,11 @@ private static ObjectParser parser(String name, Supplier REMOVE_PARSER = parser(REMOVE.getPreferredName(), AliasActions::remove); + static { + REMOVE_PARSER.declareField(AliasActions::mustExist, XContentParser::booleanValue, MUST_EXIST, ValueType.BOOLEAN); + } private static final ObjectParser REMOVE_INDEX_PARSER = parser( REMOVE_INDEX.getPreferredName(), AliasActions::removeIndex @@ -554,6 +556,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (null != isHidden) { builder.field(IS_HIDDEN.getPreferredName(), isHidden); } + if (null != mustExist) { + builder.field(MUST_EXIST.getPreferredName(), mustExist); + } builder.endObject(); builder.endObject(); return builder; @@ -582,6 +587,8 @@ public String toString() { + searchRouting + ",writeIndex=" + writeIndex + + ",mustExist=" + + mustExist + "]"; } @@ -600,12 +607,13 @@ public boolean equals(Object obj) { && Objects.equals(indexRouting, other.indexRouting) && Objects.equals(searchRouting, other.searchRouting) && Objects.equals(writeIndex, other.writeIndex) - && Objects.equals(isHidden, other.isHidden); + && Objects.equals(isHidden, other.isHidden) + && Objects.equals(mustExist, other.mustExist); } @Override public int hashCode() { - return Objects.hash(type, indices, aliases, filter, routing, indexRouting, searchRouting, writeIndex, isHidden); + return Objects.hash(type, indices, aliases, filter, routing, indexRouting, searchRouting, writeIndex, isHidden, mustExist); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 769044f3dafb7..81cb3102cfcb9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -50,6 +50,7 @@ import org.opensearch.cluster.metadata.MetadataIndexAliasesService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; +import org.opensearch.common.regex.Regex; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.Index; @@ -59,6 +60,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -220,12 +222,33 @@ private static String[] concreteAliases(IndicesAliasesRequest.AliasActions actio // for DELETE we expand the aliases String[] indexAsArray = { concreteIndex }; final Map> aliasMetadata = metadata.findAliases(action, indexAsArray); - List finalAliases = new ArrayList<>(); + Set finalAliases = new HashSet<>(); for (final List curAliases : aliasMetadata.values()) { for (AliasMetadata aliasMeta : curAliases) { finalAliases.add(aliasMeta.alias()); } } + + // must_exist can only be set in the Remove Action in Update aliases API, + // we check the value here to make the behavior consistent with Delete aliases API + if (action.mustExist() != null) { + // if must_exist is false, we should make the remove action execute silently, + // so we return the original specified aliases to avoid AliasesNotFoundException + if (!action.mustExist()) { + return action.aliases(); + } + + // if there is any non-existing aliases specified in the request and must_exist is true, throw exception in advance + if (finalAliases.isEmpty()) { + throw new AliasesNotFoundException(action.aliases()); + } + String[] nonExistingAliases = Arrays.stream(action.aliases()) + .filter(originalAlias -> finalAliases.stream().noneMatch(finalAlias -> Regex.simpleMatch(originalAlias, finalAlias))) + .toArray(String[]::new); + if (nonExistingAliases.length != 0) { + throw new AliasesNotFoundException(nonExistingAliases); + } + } return finalAliases.toArray(new String[0]); } else { // for ADD and REMOVE_INDEX we just return the current aliases diff --git a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java index 47c6cad04343e..dac6ecaa13781 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/AliasAction.java @@ -35,6 +35,7 @@ import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; +import org.opensearch.rest.action.admin.indices.AliasesNotFoundException; /** * Individual operation to perform on the cluster state as part of an {@link IndicesAliasesRequest}. @@ -225,8 +226,8 @@ boolean removeIndex() { @Override boolean apply(NewAliasValidator aliasValidator, Metadata.Builder metadata, IndexMetadata index) { if (false == index.getAliases().containsKey(alias)) { - if (mustExist != null && mustExist.booleanValue()) { - throw new IllegalArgumentException("required alias [" + alias + "] does not exist"); + if (mustExist != null && mustExist) { + throw new AliasesNotFoundException(alias); } return false; } diff --git a/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java b/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java index 8ba8d226715ed..d463782a70506 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/alias/AliasActionsTests.java @@ -241,6 +241,7 @@ public void testParseRemove() throws IOException { String[] indices = generateRandomStringArray(10, 5, false, false); String[] aliases = generateRandomStringArray(10, 5, false, false); XContentBuilder b = XContentBuilder.builder(randomFrom(XContentType.values()).xContent()); + boolean mustExist = randomBoolean(); b.startObject(); { b.startObject("remove"); @@ -255,6 +256,9 @@ public void testParseRemove() throws IOException { } else { b.field("alias", aliases[0]); } + if (mustExist) { + b.field("must_exist", true); + } } b.endObject(); } @@ -265,6 +269,9 @@ public void testParseRemove() throws IOException { assertEquals(AliasActions.Type.REMOVE, action.actionType()); assertThat(action.indices(), equalTo(indices)); assertThat(action.aliases(), equalTo(aliases)); + if (mustExist) { + assertThat(action.mustExist(), equalTo(true)); + } } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java index bf66f577e182b..9fb4551f106ec 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexAliasesServiceTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.util.set.Sets; import org.opensearch.core.index.Index; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.rest.action.admin.indices.AliasesNotFoundException; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -164,11 +165,11 @@ public void testMustExist() { // Show that removing non-existing alias with mustExist == true fails final ClusterState finalCS = after; - final IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, + final AliasesNotFoundException iae = expectThrows( + AliasesNotFoundException.class, () -> service.applyAliasActions(finalCS, singletonList(new AliasAction.Remove(index, "test_2", true))) ); - assertThat(iae.getMessage(), containsString("required alias [test_2] does not exist")); + assertThat(iae.getMessage(), containsString("aliases [test_2] missing")); } public void testMultipleIndices() { diff --git a/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java b/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java index ce862d2b69d8f..fb9db9a578b4a 100644 --- a/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java +++ b/test/framework/src/main/java/org/opensearch/index/alias/RandomAliasActionsGenerator.java @@ -102,6 +102,11 @@ public static AliasActions randomAliasAction(boolean useStringAsFilter) { action.isHidden(randomBoolean()); } } + if (action.actionType() == AliasActions.Type.REMOVE) { + if (randomBoolean()) { + action.mustExist(randomBoolean()); + } + } return action; } From 3f5ec64bc296d1bd4e27d66ae14e9f65a1f29d81 Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Tue, 9 Jan 2024 10:19:21 -0600 Subject: [PATCH 49/81] Bump peternied/required-approval from v1.2 to v1.3 (#11820) * Switched to more reliable OpenSearch Lucene snapshot location - Related https://github.com/opensearch-project/opensearch-build/issues/3874 Signed-off-by: Peter Nied Signed-off-by: Peter Nied * Changelog entry Signed-off-by: Peter Nied Signed-off-by: Peter Nied * Use required-approval@main * Trigger on any review Signed-off-by: Peter Nied * Snap to version v1.3 Signed-off-by: Peter Nied --------- Signed-off-by: Peter Nied Signed-off-by: Peter Nied --- .github/workflows/maintainer-approval.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/maintainer-approval.yml b/.github/workflows/maintainer-approval.yml index 2f87afd372d90..34e8f57cc1878 100644 --- a/.github/workflows/maintainer-approval.yml +++ b/.github/workflows/maintainer-approval.yml @@ -2,7 +2,6 @@ name: Maintainers approval on: pull_request_review: - types: [submitted] jobs: maintainer-approved-check: @@ -26,7 +25,7 @@ jobs: return maintainersResponse.data.map(item => item.login).join(', '); - - uses: peternied/required-approval@v1.2 + - uses: peternied/required-approval@v1.3 with: token: ${{ secrets.GITHUB_TOKEN }} min-required: 1 From 71f1fabe149fd0777edf44502ace4a8f0911feeb Mon Sep 17 00:00:00 2001 From: Gagan Juneja Date: Tue, 9 Jan 2024 22:54:02 +0530 Subject: [PATCH 50/81] Add support for conditional Transient header propagation (#11490) * Clear transient header from system context Signed-off-by: Gagan Juneja * Clear transient header from system context Signed-off-by: Gagan Juneja * Adds changelog Signed-off-by: Gagan Juneja * Update CHANGELOG.md Co-authored-by: Andriy Redko Signed-off-by: Gagan Juneja * Adds unit tests Signed-off-by: Gagan Juneja * Refactor code Signed-off-by: Gagan Juneja * Refactor code Signed-off-by: Gagan Juneja * Refactor code Signed-off-by: Gagan Juneja * Supress warning Signed-off-by: Gagan Juneja * Refactor code Signed-off-by: Gagan Juneja --------- Signed-off-by: Gagan Juneja Signed-off-by: Gagan Juneja Co-authored-by: Gagan Juneja Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + .../common/util/concurrent/ThreadContext.java | 24 ++++--- .../ThreadContextStatePropagator.java | 30 ++++++++- .../TaskThreadContextStatePropagator.java | 13 ++++ ...hreadContextBasedTracerContextStorage.java | 19 +++++- .../transport/TransportService.java | 17 ++--- .../util/concurrent/ThreadContextTests.java | 67 +++++++++++++++++++ ...TaskThreadContextStatePropagatorTests.java | 34 ++++++++++ ...ContextBasedTracerContextStorageTests.java | 16 +++++ 9 files changed, 193 insertions(+), 28 deletions(-) create mode 100644 server/src/test/java/org/opensearch/tasks/TaskThreadContextStatePropagatorTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 45ab53b0d4246..6da048c65d8d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -214,6 +214,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) - Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) - Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) +- Fix tracing context propagation for local transport instrumentation ([#11490](https://github.com/opensearch-project/OpenSearch/pull/11490)) ### Security diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 3da21a6777456..6580b0e0085ef 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -161,7 +161,7 @@ public StoredContext stashContext() { ); } - final Map transientHeaders = propagateTransients(context.transientHeaders); + final Map transientHeaders = propagateTransients(context.transientHeaders, context.isSystemContext); if (!transientHeaders.isEmpty()) { threadContextStruct = threadContextStruct.putTransient(transientHeaders); } @@ -182,7 +182,7 @@ public StoredContext stashContext() { public Writeable captureAsWriteable() { final ThreadContextStruct context = threadLocal.get(); return out -> { - final Map propagatedHeaders = propagateHeaders(context.transientHeaders); + final Map propagatedHeaders = propagateHeaders(context.transientHeaders, context.isSystemContext); context.writeTo(out, defaultHeader, propagatedHeaders); }; } @@ -245,7 +245,7 @@ public StoredContext newStoredContext(boolean preserveResponseHeaders, Collectio final Map newTransientHeaders = new HashMap<>(originalContext.transientHeaders); boolean transientHeadersModified = false; - final Map transientHeaders = propagateTransients(originalContext.transientHeaders); + final Map transientHeaders = propagateTransients(originalContext.transientHeaders, originalContext.isSystemContext); if (!transientHeaders.isEmpty()) { newTransientHeaders.putAll(transientHeaders); transientHeadersModified = true; @@ -322,7 +322,7 @@ public Supplier wrapRestorable(StoredContext storedContext) { @Override public void writeTo(StreamOutput out) throws IOException { final ThreadContextStruct context = threadLocal.get(); - final Map propagatedHeaders = propagateHeaders(context.transientHeaders); + final Map propagatedHeaders = propagateHeaders(context.transientHeaders, context.isSystemContext); context.writeTo(out, defaultHeader, propagatedHeaders); } @@ -534,7 +534,7 @@ boolean isDefaultContext() { * by the system itself rather than by a user action. */ public void markAsSystemContext() { - threadLocal.set(threadLocal.get().setSystemContext()); + threadLocal.set(threadLocal.get().setSystemContext(propagators)); } /** @@ -573,15 +573,15 @@ public static Map buildDefaultHeaders(Settings settings) { } } - private Map propagateTransients(Map source) { + private Map propagateTransients(Map source, boolean isSystemContext) { final Map transients = new HashMap<>(); - propagators.forEach(p -> transients.putAll(p.transients(source))); + propagators.forEach(p -> transients.putAll(p.transients(source, isSystemContext))); return transients; } - private Map propagateHeaders(Map source) { + private Map propagateHeaders(Map source, boolean isSystemContext) { final Map headers = new HashMap<>(); - propagators.forEach(p -> headers.putAll(p.headers(source))); + propagators.forEach(p -> headers.putAll(p.headers(source, isSystemContext))); return headers; } @@ -603,11 +603,13 @@ private static final class ThreadContextStruct { // saving current warning headers' size not to recalculate the size with every new warning header private final long warningHeadersSize; - private ThreadContextStruct setSystemContext() { + private ThreadContextStruct setSystemContext(final List propagators) { if (isSystemContext) { return this; } - return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, persistentHeaders, true); + final Map transients = new HashMap<>(); + propagators.forEach(p -> transients.putAll(p.transients(transientHeaders, true))); + return new ThreadContextStruct(requestHeaders, responseHeaders, transients, persistentHeaders, true); } private ThreadContextStruct( diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java index dac70b0e8124e..e8c12ae13d5eb 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java @@ -22,15 +22,41 @@ public interface ThreadContextStatePropagator { /** * Returns the list of transient headers that needs to be propagated from current context to new thread context. - * @param source current context transient headers + * + * @param source current context transient headers * @return the list of transient headers that needs to be propagated from current context to new thread context */ + @Deprecated(since = "2.12.0", forRemoval = true) Map transients(Map source); + /** + * Returns the list of transient headers that needs to be propagated from current context to new thread context. + * + * @param source current context transient headers + * @param isSystemContext if the propagation is for system context. + * @return the list of transient headers that needs to be propagated from current context to new thread context + */ + default Map transients(Map source, boolean isSystemContext) { + return transients(source); + }; + /** * Returns the list of request headers that needs to be propagated from current context to request. - * @param source current context headers + * + * @param source current context headers * @return the list of request headers that needs to be propagated from current context to request */ + @Deprecated(since = "2.12.0", forRemoval = true) Map headers(Map source); + + /** + * Returns the list of request headers that needs to be propagated from current context to request. + * + * @param source current context headers + * @param isSystemContext if the propagation is for system context. + * @return the list of request headers that needs to be propagated from current context to request + */ + default Map headers(Map source, boolean isSystemContext) { + return headers(source); + } } diff --git a/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java index ed111b34f048f..99559e45aaaee 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java +++ b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java @@ -20,7 +20,9 @@ * Propagates TASK_ID across thread contexts */ public class TaskThreadContextStatePropagator implements ThreadContextStatePropagator { + @Override + @SuppressWarnings("removal") public Map transients(Map source) { final Map transients = new HashMap<>(); @@ -32,7 +34,18 @@ public Map transients(Map source) { } @Override + public Map transients(Map source, boolean isSystemContext) { + return transients(source); + } + + @Override + @SuppressWarnings("removal") public Map headers(Map source) { return Collections.emptyMap(); } + + @Override + public Map headers(Map source, boolean isSystemContext) { + return headers(source); + } } diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java index 863f56d9fbe94..908164d1935a7 100644 --- a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java +++ b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java @@ -12,6 +12,7 @@ import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.concurrent.ThreadContextStatePropagator; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -50,20 +51,29 @@ public void put(String key, Span span) { } @Override + @SuppressWarnings("removal") public Map transients(Map source) { final Map transients = new HashMap<>(); - if (source.containsKey(CURRENT_SPAN)) { final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); if (current != null) { transients.put(CURRENT_SPAN, new SpanReference(current.getSpan())); } } - return transients; } @Override + public Map transients(Map source, boolean isSystemContext) { + if (isSystemContext == true) { + return Collections.emptyMap(); + } else { + return transients(source); + } + } + + @Override + @SuppressWarnings("removal") public Map headers(Map source) { final Map headers = new HashMap<>(); @@ -77,6 +87,11 @@ public Map headers(Map source) { return headers; } + @Override + public Map headers(Map source, boolean isSystemContext) { + return headers(source); + } + Span getCurrentSpan(String key) { SpanReference currentSpanRef = threadContext.getTransient(key); return (currentSpanRef == null) ? null : currentSpanRef.getSpan(); diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index a1697b1898eeb..d50266d8c9e4a 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -868,19 +868,10 @@ public final void sendRequest( final TransportRequestOptions options, final TransportResponseHandler handler ) { - if (connection == localNodeConnection) { - // See please https://github.com/opensearch-project/OpenSearch/issues/10291 - sendRequestAsync(connection, action, request, options, handler); - } else { - final Span span = tracer.startSpan(SpanBuilder.from(action, connection)); - try (SpanScope spanScope = tracer.withSpanInScope(span)) { - TransportResponseHandler traceableTransportResponseHandler = TraceableTransportResponseHandler.create( - handler, - span, - tracer - ); - sendRequestAsync(connection, action, request, options, traceableTransportResponseHandler); - } + final Span span = tracer.startSpan(SpanBuilder.from(action, connection)); + try (SpanScope spanScope = tracer.withSpanInScope(span)) { + TransportResponseHandler traceableTransportResponseHandler = TraceableTransportResponseHandler.create(handler, span, tracer); + sendRequestAsync(connection, action, request, options, traceableTransportResponseHandler); } } diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index a0531c76bf897..10669ca1a805b 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -44,6 +44,8 @@ import java.util.Map; import java.util.function.Supplier; +import org.mockito.Mockito; + import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasItem; @@ -740,6 +742,71 @@ public void testMarkAsSystemContext() throws IOException { assertFalse(threadContext.isSystemContext()); } + public void testSystemContextWithPropagator() { + Settings build = Settings.builder().put("request.headers.default", "1").build(); + Map transientHeaderMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map transientHeaderTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map headerMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map headerTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + ThreadContext threadContext = new ThreadContext(build); + ThreadContextStatePropagator mockPropagator = Mockito.mock(ThreadContextStatePropagator.class); + Mockito.when(mockPropagator.transients(transientHeaderMap, true)).thenReturn(Collections.emptyMap()); + Mockito.when(mockPropagator.transients(transientHeaderMap, false)).thenReturn(transientHeaderTransformedMap); + + Mockito.when(mockPropagator.headers(headerMap, true)).thenReturn(headerTransformedMap); + Mockito.when(mockPropagator.headers(headerMap, false)).thenReturn(headerTransformedMap); + threadContext.registerThreadContextStatePropagator(mockPropagator); + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("test_transient_propagation_key", 1); + assertEquals(Integer.valueOf(1), threadContext.getTransient("test_transient_propagation_key")); + assertEquals("bar", threadContext.getHeader("foo")); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + threadContext.markAsSystemContext(); + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("test_transient_propagation_key")); + assertEquals("1", threadContext.getHeader("default")); + } + + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("test_transient_propagation_key")); + assertEquals("1", threadContext.getHeader("default")); + } + + public void testSerializeSystemContext() throws IOException { + Settings build = Settings.builder().put("request.headers.default", "1").build(); + Map transientHeaderMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map transientHeaderTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map headerMap = Collections.singletonMap("test_transient_propagation_key", "test"); + Map headerTransformedMap = Collections.singletonMap("test_transient_propagation_key", "test"); + ThreadContext threadContext = new ThreadContext(build); + ThreadContextStatePropagator mockPropagator = Mockito.mock(ThreadContextStatePropagator.class); + Mockito.when(mockPropagator.transients(transientHeaderMap, true)).thenReturn(Collections.emptyMap()); + Mockito.when(mockPropagator.transients(transientHeaderMap, false)).thenReturn(transientHeaderTransformedMap); + + Mockito.when(mockPropagator.headers(headerMap, true)).thenReturn(headerTransformedMap); + Mockito.when(mockPropagator.headers(headerMap, false)).thenReturn(headerTransformedMap); + threadContext.registerThreadContextStatePropagator(mockPropagator); + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("test_transient_propagation_key", "test"); + BytesStreamOutput out = new BytesStreamOutput(); + BytesStreamOutput outFromSystemContext = new BytesStreamOutput(); + threadContext.writeTo(out); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + assertEquals("test", threadContext.getTransient("test_transient_propagation_key")); + threadContext.markAsSystemContext(); + threadContext.writeTo(outFromSystemContext); + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("test_transient_propagation_key")); + threadContext.readHeaders(outFromSystemContext.bytes().streamInput()); + assertNull(threadContext.getHeader("test_transient_propagation_key")); + } + assertEquals("test", threadContext.getTransient("test_transient_propagation_key")); + threadContext.readHeaders(out.bytes().streamInput()); + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals("test", threadContext.getHeader("test_transient_propagation_key")); + assertEquals("1", threadContext.getHeader("default")); + } + public void testPutHeaders() { Settings build = Settings.builder().put("request.headers.default", "1").build(); ThreadContext threadContext = new ThreadContext(build); diff --git a/server/src/test/java/org/opensearch/tasks/TaskThreadContextStatePropagatorTests.java b/server/src/test/java/org/opensearch/tasks/TaskThreadContextStatePropagatorTests.java new file mode 100644 index 0000000000000..bfa0d566aabd7 --- /dev/null +++ b/server/src/test/java/org/opensearch/tasks/TaskThreadContextStatePropagatorTests.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +public class TaskThreadContextStatePropagatorTests extends OpenSearchTestCase { + private final TaskThreadContextStatePropagator taskThreadContextStatePropagator = new TaskThreadContextStatePropagator(); + + public void testTransient() { + Map transientHeader = new HashMap<>(); + transientHeader.put(TASK_ID, "t_1"); + Map transientPropagatedHeader = taskThreadContextStatePropagator.transients(transientHeader, false); + assertEquals("t_1", transientPropagatedHeader.get(TASK_ID)); + } + + public void testTransientForSystemContext() { + Map transientHeader = new HashMap<>(); + transientHeader.put(TASK_ID, "t_1"); + Map transientPropagatedHeader = taskThreadContextStatePropagator.transients(transientHeader, true); + assertEquals("t_1", transientPropagatedHeader.get(TASK_ID)); + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java index ee816aa5f596d..bf11bcaf39a96 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorageTests.java @@ -252,4 +252,20 @@ public void run() { assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); } + + public void testSpanNotPropagatedToChildSystemThreadContext() { + final Span span = tracer.startSpan(SpanCreationContext.internal().name("test")); + + try (SpanScope scope = tracer.withSpanInScope(span)) { + try (StoredContext ignored = threadContext.stashContext()) { + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(span)); + threadContext.markAsSystemContext(); + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } + } + + assertThat(threadContext.getTransient(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(not(nullValue()))); + assertThat(threadContextStorage.get(ThreadContextBasedTracerContextStorage.CURRENT_SPAN), is(nullValue())); + } } From 6e2e72ba310409f7009e3801b1fb49c9823f5af1 Mon Sep 17 00:00:00 2001 From: Vikas Bansal <43470111+vikasvb90@users.noreply.github.com> Date: Tue, 9 Jan 2024 23:22:00 +0530 Subject: [PATCH 51/81] Fixed ingest pipeline script issue (#11725) Signed-off-by: vikasvb90 --- .../ingest/common/ScriptProcessor.java | 7 +- .../ingest/common/ScriptProcessorTests.java | 12 +++ .../test/ingest/190_script_processor.yml | 76 +++++++++++++++++++ .../rest-api-spec/test/ingest/90_simulate.yml | 45 +++++++++++ .../org/opensearch/ingest/IngestDocument.java | 2 + .../ingest/IngestDocumentTests.java | 6 ++ 6 files changed, 146 insertions(+), 2 deletions(-) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java index cc8889af27621..d1b4a0961b7bd 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java @@ -102,8 +102,11 @@ public IngestDocument execute(IngestDocument document) { } else { ingestScript = precompiledIngestScript; } - ingestScript.execute(document.getSourceAndMetadata()); - CollectionUtils.ensureNoSelfReferences(document.getSourceAndMetadata(), "ingest script"); + IngestDocument mutableDocument = new IngestDocument(document); + ingestScript.execute(mutableDocument.getSourceAndMetadata()); + CollectionUtils.ensureNoSelfReferences(mutableDocument.getSourceAndMetadata(), "ingest script"); + document.getSourceAndMetadata().clear(); + document.getSourceAndMetadata().putAll(mutableDocument.getSourceAndMetadata()); return document; } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java index 96d9be75c4ab7..e900458e361ce 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java @@ -105,4 +105,16 @@ private void assertIngestDocument(IngestDocument ingestDocument) { int bytesTotal = ingestDocument.getFieldValue("bytes_in", Integer.class) + ingestDocument.getFieldValue("bytes_out", Integer.class); assertThat(ingestDocument.getSourceAndMetadata().get("bytes_total"), is(bytesTotal)); } + + public void testScriptingWithSelfReferencingSourceMetadata() { + ScriptProcessor processor = new ScriptProcessor(randomAlphaOfLength(10), null, script, null, scriptService); + IngestDocument originalIngestDocument = randomDocument(); + String index = originalIngestDocument.getSourceAndMetadata().get(IngestDocument.Metadata.INDEX.getFieldName()).toString(); + String id = originalIngestDocument.getSourceAndMetadata().get(IngestDocument.Metadata.ID.getFieldName()).toString(); + Map sourceMetadata = originalIngestDocument.getSourceAndMetadata(); + originalIngestDocument.getSourceAndMetadata().put("_source", sourceMetadata); + IngestDocument ingestDocument = new IngestDocument(index, id, null, null, null, originalIngestDocument.getSourceAndMetadata()); + expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + } + } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml index 3230fb37b43f7..a66f02d6b6a6d 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/190_script_processor.yml @@ -202,3 +202,79 @@ teardown: id: 1 - match: { _source.source_field: "foo%20bar" } - match: { _source.target_field: "foo bar" } + +--- +"Test self referencing source with ignore failure": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "script" : { + "lang": "painless", + "source" : "ctx.foo['foo']=ctx.foo;ctx['test-field']='test-value'", + "ignore_failure": true + } + }, + { + "script" : { + "lang": "painless", + "source" : "ctx.target_field = Processors.uppercase(ctx.source_field)" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {source_field: "fooBar", foo: {foo: "bar"}} + + - do: + get: + index: test + id: 1 + - match: { _source.source_field: "fooBar" } + - match: { _source.target_field: "FOOBAR"} + - match: { _source.test-field: null} + +--- +"Test self referencing source without ignoring failure": + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "script" : { + "lang": "painless", + "source" : "ctx.foo['foo']=ctx.foo;ctx['test-field']='test-value'" + } + }, + { + "script" : { + "lang": "painless", + "source" : "ctx.target_field = Processors.uppercase(ctx.source_field)" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: bad_request + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: {source_field: "fooBar", foo: {foo: "bar"}} + - match: { error.root_cause.0.type: "illegal_argument_exception" } + - match: { error.root_cause.0.reason: "Iterable object is self-referencing itself (ingest script)" } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml index 7c073739f6a1f..edd649a310d42 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/90_simulate.yml @@ -1113,3 +1113,48 @@ teardown: - match: { status: 400 } - match: { error.root_cause.0.type: "illegal_argument_exception" } - match: { error.root_cause.0.reason: "Failed to parse parameter [_if_primary_term], only int or long is accepted" } + +--- +"Test simulate with pipeline with ignore failure and cyclic field assignments in script": + - do: + ingest.simulate: + verbose: true + body: > + { + "pipeline": { + "description": "_description", + "processors": [ + { + "script" : { + "ignore_failure" : true, + "lang": "painless", + "source": "ctx.foo['foo']=ctx.foo;ctx.tag='recursive'" + } + }, + { + "script" : { + "lang": "painless", + "source" : "ctx.target_field = Processors.uppercase(ctx.foo.foo)" + } + } + ] + }, + "docs": [ + { + "_source": { + "foo": { + "foo": "bar" + } + } + } + ] + } + - length: { docs: 1 } + - length: { docs.0.processor_results: 2 } + - match: { docs.0.processor_results.0.status: "error_ignored" } + - match: { docs.0.processor_results.0.ignored_error.error.type: "illegal_argument_exception" } + - match: { docs.0.processor_results.0.doc._source.tag: null } + - match: { docs.0.processor_results.1.doc._source.target_field: "BAR" } + - match: { docs.0.processor_results.1.doc._source.foo.foo: "bar" } + - match: { docs.0.processor_results.1.status: "success" } + - match: { docs.0.processor_results.1.processor_type: "script" } diff --git a/server/src/main/java/org/opensearch/ingest/IngestDocument.java b/server/src/main/java/org/opensearch/ingest/IngestDocument.java index e0de0a9488ad9..10e9e64db561e 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/opensearch/ingest/IngestDocument.java @@ -33,6 +33,7 @@ package org.opensearch.ingest; import org.opensearch.core.common.Strings; +import org.opensearch.core.common.util.CollectionUtils; import org.opensearch.index.VersionType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.IndexFieldMapper; @@ -752,6 +753,7 @@ public Map getSourceAndMetadata() { @SuppressWarnings("unchecked") public static Map deepCopyMap(Map source) { + CollectionUtils.ensureNoSelfReferences(source, "IngestDocument: Self reference present in object."); return (Map) deepCopy(source); } diff --git a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java index 8358dadf9cc3a..be035bc6ef7ea 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestDocumentTests.java @@ -95,6 +95,12 @@ public void setTestIngestDocument() { ingestDocument = new IngestDocument("index", "id", null, null, null, document); } + public void testSelfReferencingSource() { + Map value = new HashMap<>(); + value.put("foo", value); + expectThrows(IllegalArgumentException.class, () -> IngestDocument.deepCopyMap(value)); + } + public void testSimpleGetFieldValue() { assertThat(ingestDocument.getFieldValue("foo", String.class), equalTo("bar")); assertThat(ingestDocument.getFieldValue("int", Integer.class), equalTo(123)); From ebda9639f4ca98a2181bdcb2c43f82224b1d2a34 Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Tue, 9 Jan 2024 10:57:52 -0800 Subject: [PATCH 52/81] [Tiered caching] Framework changes (#10753) * [Tiered caching] Framework changes Signed-off-by: Sagar Upadhyaya * Added javadoc for new files/packages Signed-off-by: Sagar Upadhyaya * Added changelog Signed-off-by: Sagar Upadhyaya * Fixing javadoc warnings Signed-off-by: Sagar Upadhyaya * Addressing comments Signed-off-by: Sagar Upadhyaya * Addressing additional minor comments Signed-off-by: Sagar Upadhyaya * Moving non null check to builder for OS onHeapCache Signed-off-by: Sagar Upadhyaya * Adding package-info for new packages Signed-off-by: Sagar Upadhyaya * Removing service and adding different cache interfaces along with event listener support Signed-off-by: Sagar Upadhyaya * Fixing gradle missingDoc issue Signed-off-by: Sagar Upadhyaya * Changing listener logic, removing tiered cache integration with IRC Signed-off-by: Sagar Upadhyaya * Adding opensearch.internal tag for LoadAwareCacheLoader Signed-off-by: Sagar Upadhyaya * Fixing thread safety issue Signed-off-by: Sagar Upadhyaya * Remove compute function and event listener logic change for TieredCache Signed-off-by: Sagar Upadhyaya * Making Cache.compute function private Signed-off-by: Sagar Upadhyaya * Adding javadoc and more test for cache.put Signed-off-by: Sagar Upadhyaya * Adding write locks to refresh API as well Signed-off-by: Sagar Upadhyaya * Removing unwanted EventType class and refactoring one UT Signed-off-by: Sagar Upadhyaya * Removing TieredCache interface Signed-off-by: Sagar Upadhyaya --------- Signed-off-by: Sagar Upadhyaya Signed-off-by: Sagar <99425694+sgup432@users.noreply.github.com> --- CHANGELOG.md | 2 + .../org/opensearch/common/cache/Cache.java | 112 +-- .../org/opensearch/common/cache/ICache.java | 34 + .../common/cache/LoadAwareCacheLoader.java | 20 + .../cache/store/OpenSearchOnHeapCache.java | 128 +++ .../common/cache/store/StoreAwareCache.java | 23 + .../StoreAwareCacheRemovalNotification.java | 33 + .../cache/store/StoreAwareCacheValue.java | 35 + .../builders/StoreAwareCacheBuilder.java | 73 ++ .../cache/store/builders/package-info.java | 12 + .../cache/store/enums/CacheStoreType.java | 20 + .../cache/store/enums/package-info.java | 10 + .../StoreAwareCacheEventListener.java | 30 + .../cache/store/listeners/package-info.java | 10 + .../common/cache/store/package-info.java | 10 + .../cache/tier/TieredSpilloverCache.java | 268 ++++++ .../common/cache/tier/package-info.java | 10 + .../indices/IndicesRequestCache.java | 6 +- .../cache/tier/TieredSpilloverCacheTests.java | 786 ++++++++++++++++++ 19 files changed, 1567 insertions(+), 55 deletions(-) create mode 100644 server/src/main/java/org/opensearch/common/cache/ICache.java create mode 100644 server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/builders/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/cache/store/package-info.java create mode 100644 server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java create mode 100644 server/src/main/java/org/opensearch/common/cache/tier/package-info.java create mode 100644 server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 6da048c65d8d0..d23a439c35d08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,6 +100,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753] + (https://github.com/opensearch-project/OpenSearch/pull/10753)) - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) - Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) - Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) diff --git a/server/src/main/java/org/opensearch/common/cache/Cache.java b/server/src/main/java/org/opensearch/common/cache/Cache.java index 30e7c014a2ec0..d8aa4e93735e6 100644 --- a/server/src/main/java/org/opensearch/common/cache/Cache.java +++ b/server/src/main/java/org/opensearch/common/cache/Cache.java @@ -424,68 +424,74 @@ public V computeIfAbsent(K key, CacheLoader loader) throws ExecutionExcept } }); if (value == null) { - // we need to synchronize loading of a value for a given key; however, holding the segment lock while - // invoking load can lead to deadlock against another thread due to dependent key loading; therefore, we - // need a mechanism to ensure that load is invoked at most once, but we are not invoking load while holding - // the segment lock; to do this, we atomically put a future in the map that can load the value, and then - // get the value from this future on the thread that won the race to place the future into the segment map - CacheSegment segment = getCacheSegment(key); - CompletableFuture> future; - CompletableFuture> completableFuture = new CompletableFuture<>(); + value = compute(key, loader); + } + return value; + } - try (ReleasableLock ignored = segment.writeLock.acquire()) { - future = segment.map.putIfAbsent(key, completableFuture); - } + private V compute(K key, CacheLoader loader) throws ExecutionException { + long now = now(); + // we need to synchronize loading of a value for a given key; however, holding the segment lock while + // invoking load can lead to deadlock against another thread due to dependent key loading; therefore, we + // need a mechanism to ensure that load is invoked at most once, but we are not invoking load while holding + // the segment lock; to do this, we atomically put a future in the map that can load the value, and then + // get the value from this future on the thread that won the race to place the future into the segment map + CacheSegment segment = getCacheSegment(key); + CompletableFuture> future; + CompletableFuture> completableFuture = new CompletableFuture<>(); - BiFunction, Throwable, ? extends V> handler = (ok, ex) -> { - if (ok != null) { - try (ReleasableLock ignored = lruLock.acquire()) { - promote(ok, now); - } - return ok.value; - } else { - try (ReleasableLock ignored = segment.writeLock.acquire()) { - CompletableFuture> sanity = segment.map.get(key); - if (sanity != null && sanity.isCompletedExceptionally()) { - segment.map.remove(key); - } - } - return null; - } - }; + try (ReleasableLock ignored = segment.writeLock.acquire()) { + future = segment.map.putIfAbsent(key, completableFuture); + } - CompletableFuture completableValue; - if (future == null) { - future = completableFuture; - completableValue = future.handle(handler); - V loaded; - try { - loaded = loader.load(key); - } catch (Exception e) { - future.completeExceptionally(e); - throw new ExecutionException(e); - } - if (loaded == null) { - NullPointerException npe = new NullPointerException("loader returned a null value"); - future.completeExceptionally(npe); - throw new ExecutionException(npe); - } else { - future.complete(new Entry<>(key, loaded, now)); + BiFunction, Throwable, ? extends V> handler = (ok, ex) -> { + if (ok != null) { + try (ReleasableLock ignored = lruLock.acquire()) { + promote(ok, now); } + return ok.value; } else { - completableValue = future.handle(handler); + try (ReleasableLock ignored = segment.writeLock.acquire()) { + CompletableFuture> sanity = segment.map.get(key); + if (sanity != null && sanity.isCompletedExceptionally()) { + segment.map.remove(key); + } + } + return null; } + }; + CompletableFuture completableValue; + if (future == null) { + future = completableFuture; + completableValue = future.handle(handler); + V loaded; try { - value = completableValue.get(); - // check to ensure the future hasn't been completed with an exception - if (future.isCompletedExceptionally()) { - future.get(); // call get to force the exception to be thrown for other concurrent callers - throw new IllegalStateException("the future was completed exceptionally but no exception was thrown"); - } - } catch (InterruptedException e) { - throw new IllegalStateException(e); + loaded = loader.load(key); + } catch (Exception e) { + future.completeExceptionally(e); + throw new ExecutionException(e); } + if (loaded == null) { + NullPointerException npe = new NullPointerException("loader returned a null value"); + future.completeExceptionally(npe); + throw new ExecutionException(npe); + } else { + future.complete(new Entry<>(key, loaded, now)); + } + } else { + completableValue = future.handle(handler); + } + V value; + try { + value = completableValue.get(); + // check to ensure the future hasn't been completed with an exception + if (future.isCompletedExceptionally()) { + future.get(); // call get to force the exception to be thrown for other concurrent callers + throw new IllegalStateException("the future was completed exceptionally but no exception was thrown"); + } + } catch (InterruptedException e) { + throw new IllegalStateException(e); } return value; } diff --git a/server/src/main/java/org/opensearch/common/cache/ICache.java b/server/src/main/java/org/opensearch/common/cache/ICache.java new file mode 100644 index 0000000000000..c6ea5fca1a8fe --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/ICache.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +/** + * Represents a cache interface. + * @param Type of key. + * @param Type of value. + * + * @opensearch.experimental + */ +public interface ICache { + V get(K key); + + void put(K key, V value); + + V computeIfAbsent(K key, LoadAwareCacheLoader loader) throws Exception; + + void invalidate(K key); + + void invalidateAll(); + + Iterable keys(); + + long count(); + + void refresh(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java new file mode 100644 index 0000000000000..57aa4aa39c782 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/LoadAwareCacheLoader.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache; + +/** + * Extends a cache loader with awareness of whether the data is loaded or not. + * @param Type of key. + * @param Type of value. + * + * @opensearch.internal + */ +public interface LoadAwareCacheLoader extends CacheLoader { + boolean isLoaded(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java new file mode 100644 index 0000000000000..c497c8dbb7ea9 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -0,0 +1,128 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store; + +import org.opensearch.common.cache.Cache; +import org.opensearch.common.cache.CacheBuilder; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalListener; +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; +import org.opensearch.common.cache.store.enums.CacheStoreType; +import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; + +/** + * This variant of on-heap cache uses OpenSearch custom cache implementation. + * @param Type of key. + * @param Type of value. + * + * @opensearch.experimental + */ +public class OpenSearchOnHeapCache implements StoreAwareCache, RemovalListener { + + private final Cache cache; + + private final StoreAwareCacheEventListener eventListener; + + public OpenSearchOnHeapCache(Builder builder) { + CacheBuilder cacheBuilder = CacheBuilder.builder() + .setMaximumWeight(builder.getMaxWeightInBytes()) + .weigher(builder.getWeigher()) + .removalListener(this); + if (builder.getExpireAfterAcess() != null) { + cacheBuilder.setExpireAfterAccess(builder.getExpireAfterAcess()); + } + cache = cacheBuilder.build(); + this.eventListener = builder.getEventListener(); + } + + @Override + public V get(K key) { + V value = cache.get(key); + if (value != null) { + eventListener.onHit(key, value, CacheStoreType.ON_HEAP); + } else { + eventListener.onMiss(key, CacheStoreType.ON_HEAP); + } + return value; + } + + @Override + public void put(K key, V value) { + cache.put(key, value); + eventListener.onCached(key, value, CacheStoreType.ON_HEAP); + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader loader) throws Exception { + V value = cache.computeIfAbsent(key, key1 -> loader.load(key)); + if (!loader.isLoaded()) { + eventListener.onHit(key, value, CacheStoreType.ON_HEAP); + } else { + eventListener.onMiss(key, CacheStoreType.ON_HEAP); + eventListener.onCached(key, value, CacheStoreType.ON_HEAP); + } + return value; + } + + @Override + public void invalidate(K key) { + cache.invalidate(key); + } + + @Override + public void invalidateAll() { + cache.invalidateAll(); + } + + @Override + public Iterable keys() { + return cache.keys(); + } + + @Override + public long count() { + return cache.count(); + } + + @Override + public void refresh() { + cache.refresh(); + } + + @Override + public CacheStoreType getTierType() { + return CacheStoreType.ON_HEAP; + } + + @Override + public void onRemoval(RemovalNotification notification) { + eventListener.onRemoval( + new StoreAwareCacheRemovalNotification<>( + notification.getKey(), + notification.getValue(), + notification.getRemovalReason(), + CacheStoreType.ON_HEAP + ) + ); + } + + /** + * Builder object + * @param Type of key + * @param Type of value + */ + public static class Builder extends StoreAwareCacheBuilder { + + @Override + public StoreAwareCache build() { + return new OpenSearchOnHeapCache(this); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java new file mode 100644 index 0000000000000..45ca48d94c140 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCache.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store; + +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.store.enums.CacheStoreType; + +/** + * Represents a cache with a specific type of store like onHeap, disk etc. + * @param Type of key. + * @param Type of value. + * + * @opensearch.experimental + */ +public interface StoreAwareCache extends ICache { + CacheStoreType getTierType(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java new file mode 100644 index 0000000000000..492dbff3532a1 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheRemovalNotification.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store; + +import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.store.enums.CacheStoreType; + +/** + * Removal notification for store aware cache. + * @param Type of key. + * @param Type of value. + * + * @opensearch.internal + */ +public class StoreAwareCacheRemovalNotification extends RemovalNotification { + private final CacheStoreType cacheStoreType; + + public StoreAwareCacheRemovalNotification(K key, V value, RemovalReason removalReason, CacheStoreType cacheStoreType) { + super(key, value, removalReason); + this.cacheStoreType = cacheStoreType; + } + + public CacheStoreType getCacheStoreType() { + return cacheStoreType; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java new file mode 100644 index 0000000000000..4fbbbbfebfaa7 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/StoreAwareCacheValue.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store; + +import org.opensearch.common.cache.store.enums.CacheStoreType; + +/** + * Represents a store aware cache value. + * @param Type of value. + * + * @opensearch.internal + */ +public class StoreAwareCacheValue { + private final V value; + private final CacheStoreType source; + + public StoreAwareCacheValue(V value, CacheStoreType source) { + this.value = value; + this.source = source; + } + + public V getValue() { + return value; + } + + public CacheStoreType getCacheStoreType() { + return source; + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java b/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java new file mode 100644 index 0000000000000..fc5aa48aae90f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/StoreAwareCacheBuilder.java @@ -0,0 +1,73 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.builders; + +import org.opensearch.common.cache.store.StoreAwareCache; +import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; +import org.opensearch.common.unit.TimeValue; + +import java.util.function.ToLongBiFunction; + +/** + * Builder for store aware cache. + * @param Type of key. + * @param Type of value. + * + * @opensearch.internal + */ +public abstract class StoreAwareCacheBuilder { + + private long maxWeightInBytes; + + private ToLongBiFunction weigher; + + private TimeValue expireAfterAcess; + + private StoreAwareCacheEventListener eventListener; + + public StoreAwareCacheBuilder() {} + + public StoreAwareCacheBuilder setMaximumWeightInBytes(long sizeInBytes) { + this.maxWeightInBytes = sizeInBytes; + return this; + } + + public StoreAwareCacheBuilder setWeigher(ToLongBiFunction weigher) { + this.weigher = weigher; + return this; + } + + public StoreAwareCacheBuilder setExpireAfterAccess(TimeValue expireAfterAcess) { + this.expireAfterAcess = expireAfterAcess; + return this; + } + + public StoreAwareCacheBuilder setEventListener(StoreAwareCacheEventListener eventListener) { + this.eventListener = eventListener; + return this; + } + + public long getMaxWeightInBytes() { + return maxWeightInBytes; + } + + public TimeValue getExpireAfterAcess() { + return expireAfterAcess; + } + + public ToLongBiFunction getWeigher() { + return weigher; + } + + public StoreAwareCacheEventListener getEventListener() { + return eventListener; + } + + public abstract StoreAwareCache build(); +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/builders/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/builders/package-info.java new file mode 100644 index 0000000000000..ac4590ae3bff7 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/builders/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Base package for builders. + */ +package org.opensearch.common.cache.store.builders; diff --git a/server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java b/server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java new file mode 100644 index 0000000000000..04c0825787b66 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/enums/CacheStoreType.java @@ -0,0 +1,20 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.enums; + +/** + * Cache store types in tiered cache. + * + * @opensearch.internal + */ +public enum CacheStoreType { + + ON_HEAP, + DISK; +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java new file mode 100644 index 0000000000000..7a4e0fa7201fd --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/enums/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package related to tiered cache enums */ +package org.opensearch.common.cache.store.enums; diff --git a/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java b/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java new file mode 100644 index 0000000000000..6d7e4b39aaf9f --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/listeners/StoreAwareCacheEventListener.java @@ -0,0 +1,30 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.store.listeners; + +import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; +import org.opensearch.common.cache.store.enums.CacheStoreType; + +/** + * This can be used to listen to tiered caching events + * @param Type of key + * @param Type of value + * + * @opensearch.internal + */ +public interface StoreAwareCacheEventListener { + + void onMiss(K key, CacheStoreType cacheStoreType); + + void onRemoval(StoreAwareCacheRemovalNotification notification); + + void onHit(K key, V value, CacheStoreType cacheStoreType); + + void onCached(K key, V value, CacheStoreType cacheStoreType); +} diff --git a/server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java new file mode 100644 index 0000000000000..c3222ca3ffb62 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/listeners/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package related to tiered cache listeners */ +package org.opensearch.common.cache.store.listeners; diff --git a/server/src/main/java/org/opensearch/common/cache/store/package-info.java b/server/src/main/java/org/opensearch/common/cache/store/package-info.java new file mode 100644 index 0000000000000..edc1ecd7d5e7a --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/store/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for store aware caches. */ +package org.opensearch.common.cache.store; diff --git a/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java b/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java new file mode 100644 index 0000000000000..8b432c9484aed --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/tier/TieredSpilloverCache.java @@ -0,0 +1,268 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.tier; + +import org.opensearch.common.cache.ICache; +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.store.StoreAwareCache; +import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; +import org.opensearch.common.cache.store.StoreAwareCacheValue; +import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; +import org.opensearch.common.cache.store.enums.CacheStoreType; +import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.common.util.iterable.Iterables; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Function; + +/** + * This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap + * and the items evicted from on heap cache are moved to disk based cache. If disk based cache also gets full, + * then items are eventually evicted from it and removed which will result in cache miss. + * + * @param Type of key + * @param Type of value + * + * @opensearch.experimental + */ +public class TieredSpilloverCache implements ICache, StoreAwareCacheEventListener { + + // TODO: Remove optional when diskCache implementation is integrated. + private final Optional> onDiskCache; + private final StoreAwareCache onHeapCache; + private final StoreAwareCacheEventListener listener; + ReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + ReleasableLock readLock = new ReleasableLock(readWriteLock.readLock()); + ReleasableLock writeLock = new ReleasableLock(readWriteLock.writeLock()); + + /** + * Maintains caching tiers in ascending order of cache latency. + */ + private final List> cacheList; + + TieredSpilloverCache(Builder builder) { + Objects.requireNonNull(builder.onHeapCacheBuilder, "onHeap cache builder can't be null"); + this.onHeapCache = builder.onHeapCacheBuilder.setEventListener(this).build(); + if (builder.onDiskCacheBuilder != null) { + this.onDiskCache = Optional.of(builder.onDiskCacheBuilder.setEventListener(this).build()); + } else { + this.onDiskCache = Optional.empty(); + } + this.listener = builder.listener; + this.cacheList = this.onDiskCache.map(diskTier -> Arrays.asList(this.onHeapCache, diskTier)).orElse(List.of(this.onHeapCache)); + } + + // Package private for testing + StoreAwareCache getOnHeapCache() { + return onHeapCache; + } + + // Package private for testing + Optional> getOnDiskCache() { + return onDiskCache; + } + + @Override + public V get(K key) { + StoreAwareCacheValue cacheValue = getValueFromTieredCache(true).apply(key); + if (cacheValue == null) { + return null; + } + return cacheValue.getValue(); + } + + @Override + public void put(K key, V value) { + try (ReleasableLock ignore = writeLock.acquire()) { + onHeapCache.put(key, value); + listener.onCached(key, value, CacheStoreType.ON_HEAP); + } + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader loader) throws Exception { + // We are skipping calling event listeners at this step as we do another get inside below computeIfAbsent. + // Where we might end up calling onMiss twice for a key not present in onHeap cache. + // Similary we might end up calling both onMiss and onHit for a key, in case we are receiving concurrent + // requests for the same key which requires loading only once. + StoreAwareCacheValue cacheValue = getValueFromTieredCache(false).apply(key); + if (cacheValue == null) { + // Add the value to the onHeap cache. We are calling computeIfAbsent which does another get inside. + // This is needed as there can be many requests for the same key at the same time and we only want to load + // the value once. + V value = null; + try (ReleasableLock ignore = writeLock.acquire()) { + value = onHeapCache.computeIfAbsent(key, loader); + } + if (loader.isLoaded()) { + listener.onMiss(key, CacheStoreType.ON_HEAP); + onDiskCache.ifPresent(diskTier -> listener.onMiss(key, CacheStoreType.DISK)); + listener.onCached(key, value, CacheStoreType.ON_HEAP); + } else { + listener.onHit(key, value, CacheStoreType.ON_HEAP); + } + return value; + } + listener.onHit(key, cacheValue.getValue(), cacheValue.getCacheStoreType()); + if (cacheValue.getCacheStoreType().equals(CacheStoreType.DISK)) { + listener.onMiss(key, CacheStoreType.ON_HEAP); + } + return cacheValue.getValue(); + } + + @Override + public void invalidate(K key) { + // We are trying to invalidate the key from all caches though it would be present in only of them. + // Doing this as we don't know where it is located. We could do a get from both and check that, but what will + // also trigger a hit/miss listener event, so ignoring it for now. + try (ReleasableLock ignore = writeLock.acquire()) { + for (StoreAwareCache storeAwareCache : cacheList) { + storeAwareCache.invalidate(key); + } + } + } + + @Override + public void invalidateAll() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (StoreAwareCache storeAwareCache : cacheList) { + storeAwareCache.invalidateAll(); + } + } + } + + /** + * Provides an iteration over both onHeap and disk keys. This is not protected from any mutations to the cache. + * @return An iterable over (onHeap + disk) keys + */ + @Override + public Iterable keys() { + Iterable onDiskKeysIterable; + if (onDiskCache.isPresent()) { + onDiskKeysIterable = onDiskCache.get().keys(); + } else { + onDiskKeysIterable = Collections::emptyIterator; + } + return Iterables.concat(onHeapCache.keys(), onDiskKeysIterable); + } + + @Override + public long count() { + long totalCount = 0; + for (StoreAwareCache storeAwareCache : cacheList) { + totalCount += storeAwareCache.count(); + } + return totalCount; + } + + @Override + public void refresh() { + try (ReleasableLock ignore = writeLock.acquire()) { + for (StoreAwareCache storeAwareCache : cacheList) { + storeAwareCache.refresh(); + } + } + } + + @Override + public void onMiss(K key, CacheStoreType cacheStoreType) { + // Misses for tiered cache are tracked here itself. + } + + @Override + public void onRemoval(StoreAwareCacheRemovalNotification notification) { + if (RemovalReason.EVICTED.equals(notification.getRemovalReason()) + || RemovalReason.CAPACITY.equals(notification.getRemovalReason())) { + switch (notification.getCacheStoreType()) { + case ON_HEAP: + try (ReleasableLock ignore = writeLock.acquire()) { + onDiskCache.ifPresent(diskTier -> { diskTier.put(notification.getKey(), notification.getValue()); }); + } + onDiskCache.ifPresent( + diskTier -> listener.onCached(notification.getKey(), notification.getValue(), CacheStoreType.DISK) + ); + break; + default: + break; + } + } + listener.onRemoval(notification); + } + + @Override + public void onHit(K key, V value, CacheStoreType cacheStoreType) { + // Hits for tiered cache are tracked here itself. + } + + @Override + public void onCached(K key, V value, CacheStoreType cacheStoreType) { + // onCached events for tiered cache are tracked here itself. + } + + private Function> getValueFromTieredCache(boolean triggerEventListener) { + return key -> { + try (ReleasableLock ignore = readLock.acquire()) { + for (StoreAwareCache storeAwareCache : cacheList) { + V value = storeAwareCache.get(key); + if (value != null) { + if (triggerEventListener) { + listener.onHit(key, value, storeAwareCache.getTierType()); + } + return new StoreAwareCacheValue<>(value, storeAwareCache.getTierType()); + } else { + if (triggerEventListener) { + listener.onMiss(key, storeAwareCache.getTierType()); + } + } + } + } + return null; + }; + } + + /** + * Builder object for tiered spillover cache. + * @param Type of key + * @param Type of value + */ + public static class Builder { + private StoreAwareCacheBuilder onHeapCacheBuilder; + private StoreAwareCacheBuilder onDiskCacheBuilder; + private StoreAwareCacheEventListener listener; + + public Builder() {} + + public Builder setOnHeapCacheBuilder(StoreAwareCacheBuilder onHeapCacheBuilder) { + this.onHeapCacheBuilder = onHeapCacheBuilder; + return this; + } + + public Builder setOnDiskCacheBuilder(StoreAwareCacheBuilder onDiskCacheBuilder) { + this.onDiskCacheBuilder = onDiskCacheBuilder; + return this; + } + + public Builder setListener(StoreAwareCacheEventListener listener) { + this.listener = listener; + return this; + } + + public TieredSpilloverCache build() { + return new TieredSpilloverCache<>(this); + } + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/tier/package-info.java b/server/src/main/java/org/opensearch/common/cache/tier/package-info.java new file mode 100644 index 0000000000000..7ad81dbe3073c --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/tier/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base package for cache tier support. */ +package org.opensearch.common.cache.tier; diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 629cea102a8b2..4a19f8eb8714d 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -247,7 +247,7 @@ interface CacheEntity extends Accountable { * * @opensearch.internal */ - static class Key implements Accountable { + public static class Key implements Accountable { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); public final CacheEntity entity; // use as identity equality @@ -328,6 +328,9 @@ public int hashCode() { } } + /** + * Logic to clean up in-memory cache. + */ synchronized void cleanCache() { final Set currentKeysToClean = new HashSet<>(); final Set currentFullClean = new HashSet<>(); @@ -355,7 +358,6 @@ synchronized void cleanCache() { } } } - cache.refresh(); } diff --git a/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java b/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java new file mode 100644 index 0000000000000..eb75244c6f8b1 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/cache/tier/TieredSpilloverCacheTests.java @@ -0,0 +1,786 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.tier; + +import org.opensearch.common.cache.LoadAwareCacheLoader; +import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.StoreAwareCache; +import org.opensearch.common.cache.store.StoreAwareCacheRemovalNotification; +import org.opensearch.common.cache.store.builders.StoreAwareCacheBuilder; +import org.opensearch.common.cache.store.enums.CacheStoreType; +import org.opensearch.common.cache.store.listeners.StoreAwareCacheEventListener; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Phaser; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +public class TieredSpilloverCacheTests extends OpenSearchTestCase { + + public void testComputeIfAbsentWithoutAnyOnHeapCacheEviction() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + MockCacheEventListener eventListener = new MockCacheEventListener(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + randomIntBetween(1, 4), + eventListener, + 0 + ); + int numOfItems1 = randomIntBetween(1, onHeapCacheSize / 2 - 1); + List keys = new ArrayList<>(); + // Put values in cache. + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + keys.add(key); + LoadAwareCacheLoader tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertEquals(numOfItems1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + + // Try to hit cache again with some randomization. + int numOfItems2 = randomIntBetween(1, onHeapCacheSize / 2 - 1); + int cacheHit = 0; + int cacheMiss = 0; + for (int iter = 0; iter < numOfItems2; iter++) { + if (randomBoolean()) { + // Hit cache with stored key + cacheHit++; + int index = randomIntBetween(0, keys.size() - 1); + tieredSpilloverCache.computeIfAbsent(keys.get(index), getLoadAwareCacheLoader()); + } else { + // Hit cache with randomized key which is expected to miss cache always. + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), getLoadAwareCacheLoader()); + cacheMiss++; + } + } + assertEquals(cacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); + assertEquals(numOfItems1 + cacheMiss, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + } + + public void testComputeIfAbsentWithEvictionsFromOnHeapCache() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + MockCacheEventListener eventListener = new MockCacheEventListener(); + StoreAwareCacheBuilder cacheBuilder = new OpenSearchOnHeapCache.Builder().setMaximumWeightInBytes( + onHeapCacheSize * 50 + ).setWeigher((k, v) -> 50); // Will support onHeapCacheSize entries. + + StoreAwareCacheBuilder diskCacheBuilder = new MockOnDiskCache.Builder().setMaxSize(diskCacheSize) + .setDeliberateDelay(0); + + TieredSpilloverCache tieredSpilloverCache = new TieredSpilloverCache.Builder() + .setOnHeapCacheBuilder(cacheBuilder) + .setOnDiskCacheBuilder(diskCacheBuilder) + .setListener(eventListener) + .build(); + + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List onHeapKeys = new ArrayList<>(); + List diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + LoadAwareCacheLoader tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + long actualDiskCacheSize = tieredSpilloverCache.getOnDiskCache().get().count(); + assertEquals(numOfItems1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); + assertEquals(actualDiskCacheSize, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + + assertEquals( + eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count(), + eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count() + ); + assertEquals(actualDiskCacheSize, eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count()); + + tieredSpilloverCache.getOnHeapCache().keys().forEach(onHeapKeys::add); + tieredSpilloverCache.getOnDiskCache().get().keys().forEach(diskTierKeys::add); + + assertEquals(tieredSpilloverCache.getOnHeapCache().count(), onHeapKeys.size()); + assertEquals(tieredSpilloverCache.getOnDiskCache().get().count(), diskTierKeys.size()); + + // Try to hit cache again with some randomization. + int numOfItems2 = randomIntBetween(50, 200); + int onHeapCacheHit = 0; + int diskCacheHit = 0; + int cacheMiss = 0; + for (int iter = 0; iter < numOfItems2; iter++) { + if (randomBoolean()) { // Hit cache with key stored in onHeap cache. + onHeapCacheHit++; + int index = randomIntBetween(0, onHeapKeys.size() - 1); + LoadAwareCacheLoader loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(onHeapKeys.get(index), loadAwareCacheLoader); + assertFalse(loadAwareCacheLoader.isLoaded()); + } else { // Hit cache with key stored in disk cache. + diskCacheHit++; + int index = randomIntBetween(0, diskTierKeys.size() - 1); + LoadAwareCacheLoader loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(diskTierKeys.get(index), loadAwareCacheLoader); + assertFalse(loadAwareCacheLoader.isLoaded()); + } + } + for (int iter = 0; iter < randomIntBetween(50, 200); iter++) { + // Hit cache with randomized key which is expected to miss cache always. + LoadAwareCacheLoader tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); + cacheMiss++; + } + // On heap cache misses would also include diskCacheHits as it means it missed onHeap cache. + assertEquals(numOfItems1 + cacheMiss + diskCacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).missCount.count()); + assertEquals(onHeapCacheHit, eventListener.enumMap.get(CacheStoreType.ON_HEAP).hitCount.count()); + assertEquals(cacheMiss + numOfItems1, eventListener.enumMap.get(CacheStoreType.DISK).missCount.count()); + assertEquals(diskCacheHit, eventListener.enumMap.get(CacheStoreType.DISK).hitCount.count()); + } + + public void testComputeIfAbsentWithEvictionsFromBothTier() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheEventListener eventListener = new MockCacheEventListener(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + + int numOfItems = randomIntBetween(totalSize + 1, totalSize * 3); + for (int iter = 0; iter < numOfItems; iter++) { + LoadAwareCacheLoader tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), tieredCacheLoader); + } + assertTrue(eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count() > 0); + assertTrue(eventListener.enumMap.get(CacheStoreType.DISK).evictionsMetric.count() > 0); + } + + public void testGetAndCount() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheEventListener eventListener = new MockCacheEventListener(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List onHeapKeys = new ArrayList<>(); + List diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + if (iter > (onHeapCacheSize - 1)) { + // All these are bound to go to disk based cache. + diskTierKeys.add(key); + } else { + onHeapKeys.add(key); + } + LoadAwareCacheLoader loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } + + for (int iter = 0; iter < numOfItems1; iter++) { + if (randomBoolean()) { + if (randomBoolean()) { + int index = randomIntBetween(0, onHeapKeys.size() - 1); + assertNotNull(tieredSpilloverCache.get(onHeapKeys.get(index))); + } else { + int index = randomIntBetween(0, diskTierKeys.size() - 1); + assertNotNull(tieredSpilloverCache.get(diskTierKeys.get(index))); + } + } else { + assertNull(tieredSpilloverCache.get(UUID.randomUUID().toString())); + } + } + assertEquals(numOfItems1, tieredSpilloverCache.count()); + } + + public void testWithDiskTierNull() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + MockCacheEventListener eventListener = new MockCacheEventListener(); + + StoreAwareCacheBuilder onHeapCacheBuilder = new OpenSearchOnHeapCache.Builder() + .setMaximumWeightInBytes(onHeapCacheSize * 20) + .setWeigher((k, v) -> 20); // Will support upto onHeapCacheSize entries + TieredSpilloverCache tieredSpilloverCache = new TieredSpilloverCache.Builder() + .setOnHeapCacheBuilder(onHeapCacheBuilder) + .setListener(eventListener) + .build(); + + int numOfItems = randomIntBetween(onHeapCacheSize + 1, onHeapCacheSize * 3); + for (int iter = 0; iter < numOfItems; iter++) { + LoadAwareCacheLoader loadAwareCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), loadAwareCacheLoader); + } + assertTrue(eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count() > 0); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).cachedCount.count()); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).evictionsMetric.count()); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.DISK).missCount.count()); + } + + public void testPut() { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(onHeapCacheSize + 1, 100); + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + tieredSpilloverCache.put(key, value); + assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).cachedCount.count()); + assertEquals(1, tieredSpilloverCache.count()); + } + + public void testPutAndVerifyNewItemsArePresentOnHeapCache() throws Exception { + int onHeapCacheSize = randomIntBetween(200, 400); + int diskCacheSize = randomIntBetween(450, 800); + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + + for (int i = 0; i < onHeapCacheSize; i++) { + tieredSpilloverCache.computeIfAbsent(UUID.randomUUID().toString(), new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) throws Exception { + return UUID.randomUUID().toString(); + } + }); + } + + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(0, tieredSpilloverCache.getOnDiskCache().get().count()); + + // Again try to put OnHeap cache capacity amount of new items. + List newKeyList = new ArrayList<>(); + for (int i = 0; i < onHeapCacheSize; i++) { + newKeyList.add(UUID.randomUUID().toString()); + } + + for (int i = 0; i < newKeyList.size(); i++) { + tieredSpilloverCache.computeIfAbsent(newKeyList.get(i), new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + } + + // Verify that new items are part of onHeap cache. + List actualOnHeapCacheKeys = new ArrayList<>(); + tieredSpilloverCache.getOnHeapCache().keys().forEach(actualOnHeapCacheKeys::add); + + assertEquals(newKeyList.size(), actualOnHeapCacheKeys.size()); + for (int i = 0; i < actualOnHeapCacheKeys.size(); i++) { + assertTrue(newKeyList.contains(actualOnHeapCacheKeys.get(i))); + } + + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(onHeapCacheSize, tieredSpilloverCache.getOnDiskCache().get().count()); + } + + public void testInvalidate() { + int onHeapCacheSize = 1; + int diskCacheSize = 10; + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + // First try to invalidate without the key present in cache. + tieredSpilloverCache.invalidate(key); + assertEquals(0, eventListener.enumMap.get(CacheStoreType.ON_HEAP).invalidationMetric.count()); + + // Now try to invalidate with the key present in onHeap cache. + tieredSpilloverCache.put(key, value); + tieredSpilloverCache.invalidate(key); + assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).invalidationMetric.count()); + assertEquals(0, tieredSpilloverCache.count()); + + tieredSpilloverCache.put(key, value); + // Put another key/value so that one of the item is evicted to disk cache. + String key2 = UUID.randomUUID().toString(); + tieredSpilloverCache.put(key2, UUID.randomUUID().toString()); + assertEquals(2, tieredSpilloverCache.count()); + // Again invalidate older key + tieredSpilloverCache.invalidate(key); + assertEquals(1, eventListener.enumMap.get(CacheStoreType.DISK).invalidationMetric.count()); + assertEquals(1, tieredSpilloverCache.count()); + } + + public void testCacheKeys() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + List onHeapKeys = new ArrayList<>(); + List diskTierKeys = new ArrayList<>(); + // During first round add onHeapCacheSize entries. Will go to onHeap cache initially. + for (int i = 0; i < onHeapCacheSize; i++) { + String key = UUID.randomUUID().toString(); + diskTierKeys.add(key); + tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); + } + // In another round, add another onHeapCacheSize entries. These will go to onHeap and above ones will be + // evicted to onDisk cache. + for (int i = 0; i < onHeapCacheSize; i++) { + String key = UUID.randomUUID().toString(); + onHeapKeys.add(key); + tieredSpilloverCache.computeIfAbsent(key, getLoadAwareCacheLoader()); + } + + List actualOnHeapKeys = new ArrayList<>(); + List actualOnDiskKeys = new ArrayList<>(); + Iterable onHeapiterable = tieredSpilloverCache.getOnHeapCache().keys(); + Iterable onDiskiterable = tieredSpilloverCache.getOnDiskCache().get().keys(); + onHeapiterable.iterator().forEachRemaining(actualOnHeapKeys::add); + onDiskiterable.iterator().forEachRemaining(actualOnDiskKeys::add); + for (String onHeapKey : onHeapKeys) { + assertTrue(actualOnHeapKeys.contains(onHeapKey)); + } + for (String onDiskKey : actualOnDiskKeys) { + assertTrue(actualOnDiskKeys.contains(onDiskKey)); + } + + // Testing keys() which returns all keys. + List actualMergedKeys = new ArrayList<>(); + List expectedMergedKeys = new ArrayList<>(); + expectedMergedKeys.addAll(onHeapKeys); + expectedMergedKeys.addAll(diskTierKeys); + + Iterable mergedIterable = tieredSpilloverCache.keys(); + mergedIterable.iterator().forEachRemaining(actualMergedKeys::add); + + assertEquals(expectedMergedKeys.size(), actualMergedKeys.size()); + for (String key : expectedMergedKeys) { + assertTrue(actualMergedKeys.contains(key)); + } + } + + public void testRefresh() { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + tieredSpilloverCache.refresh(); + } + + public void testInvalidateAll() throws Exception { + int onHeapCacheSize = randomIntBetween(10, 30); + int diskCacheSize = randomIntBetween(60, 100); + int totalSize = onHeapCacheSize + diskCacheSize; + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + // Put values in cache more than it's size and cause evictions from onHeap. + int numOfItems1 = randomIntBetween(onHeapCacheSize + 1, totalSize); + List onHeapKeys = new ArrayList<>(); + List diskTierKeys = new ArrayList<>(); + for (int iter = 0; iter < numOfItems1; iter++) { + String key = UUID.randomUUID().toString(); + if (iter > (onHeapCacheSize - 1)) { + // All these are bound to go to disk based cache. + diskTierKeys.add(key); + } else { + onHeapKeys.add(key); + } + LoadAwareCacheLoader tieredCacheLoader = getLoadAwareCacheLoader(); + tieredSpilloverCache.computeIfAbsent(key, tieredCacheLoader); + } + assertEquals(numOfItems1, tieredSpilloverCache.count()); + tieredSpilloverCache.invalidateAll(); + assertEquals(0, tieredSpilloverCache.count()); + } + + public void testComputeIfAbsentConcurrently() throws Exception { + int onHeapCacheSize = randomIntBetween(100, 300); + int diskCacheSize = randomIntBetween(200, 400); + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + + TieredSpilloverCache tieredSpilloverCache = intializeTieredSpilloverCache( + onHeapCacheSize, + diskCacheSize, + eventListener, + 0 + ); + + int numberOfSameKeys = randomIntBetween(10, onHeapCacheSize - 1); + String key = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + + Thread[] threads = new Thread[numberOfSameKeys]; + Phaser phaser = new Phaser(numberOfSameKeys + 1); + CountDownLatch countDownLatch = new CountDownLatch(numberOfSameKeys); // To wait for all threads to finish. + + List> loadAwareCacheLoaderList = new CopyOnWriteArrayList<>(); + + for (int i = 0; i < numberOfSameKeys; i++) { + threads[i] = new Thread(() -> { + try { + LoadAwareCacheLoader loadAwareCacheLoader = new LoadAwareCacheLoader() { + boolean isLoaded = false; + + @Override + public boolean isLoaded() { + return isLoaded; + } + + @Override + public Object load(Object key) throws Exception { + isLoaded = true; + return value; + } + }; + loadAwareCacheLoaderList.add(loadAwareCacheLoader); + phaser.arriveAndAwaitAdvance(); + tieredSpilloverCache.computeIfAbsent(key, loadAwareCacheLoader); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + threads[i].start(); + } + phaser.arriveAndAwaitAdvance(); + countDownLatch.await(); // Wait for rest of tasks to be cancelled. + int numberOfTimesKeyLoaded = 0; + assertEquals(numberOfSameKeys, loadAwareCacheLoaderList.size()); + for (int i = 0; i < loadAwareCacheLoaderList.size(); i++) { + LoadAwareCacheLoader loader = loadAwareCacheLoaderList.get(i); + if (loader.isLoaded()) { + numberOfTimesKeyLoaded++; + } + } + assertEquals(1, numberOfTimesKeyLoaded); // It should be loaded only once. + } + + public void testConcurrencyForEvictionFlow() throws Exception { + int diskCacheSize = randomIntBetween(450, 800); + + MockCacheEventListener eventListener = new MockCacheEventListener<>(); + + StoreAwareCacheBuilder cacheBuilder = new OpenSearchOnHeapCache.Builder().setMaximumWeightInBytes( + 200 + ).setWeigher((k, v) -> 150); + + StoreAwareCacheBuilder diskCacheBuilder = new MockOnDiskCache.Builder().setMaxSize(diskCacheSize) + .setDeliberateDelay(500); + + TieredSpilloverCache tieredSpilloverCache = new TieredSpilloverCache.Builder() + .setOnHeapCacheBuilder(cacheBuilder) + .setOnDiskCacheBuilder(diskCacheBuilder) + .setListener(eventListener) + .build(); + + String keyToBeEvicted = "key1"; + String secondKey = "key2"; + + // Put first key on tiered cache. Will go into onHeap cache. + tieredSpilloverCache.computeIfAbsent(keyToBeEvicted, new LoadAwareCacheLoader<>() { + @Override + public boolean isLoaded() { + return false; + } + + @Override + public String load(String key) { + return UUID.randomUUID().toString(); + } + }); + CountDownLatch countDownLatch = new CountDownLatch(1); + CountDownLatch countDownLatch1 = new CountDownLatch(1); + // Put second key on tiered cache. Will cause eviction of first key from onHeap cache and should go into + // disk cache. + LoadAwareCacheLoader loadAwareCacheLoader = getLoadAwareCacheLoader(); + Thread thread = new Thread(() -> { + try { + tieredSpilloverCache.computeIfAbsent(secondKey, loadAwareCacheLoader); + countDownLatch1.countDown(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + thread.start(); + assertBusy(() -> { assertTrue(loadAwareCacheLoader.isLoaded()); }, 100, TimeUnit.MILLISECONDS); // We wait for new key to be loaded + // after which it eviction flow is + // guaranteed to occur. + StoreAwareCache onDiskCache = tieredSpilloverCache.getOnDiskCache().get(); + + // Now on a different thread, try to get key(above one which got evicted) from tiered cache. We expect this + // should return not null value as it should be present on diskCache. + AtomicReference actualValue = new AtomicReference<>(); + Thread thread1 = new Thread(() -> { + try { + actualValue.set(tieredSpilloverCache.get(keyToBeEvicted)); + } catch (Exception e) { + throw new RuntimeException(e); + } + countDownLatch.countDown(); + }); + thread1.start(); + countDownLatch.await(); + assertNotNull(actualValue.get()); + countDownLatch1.await(); + assertEquals(1, eventListener.enumMap.get(CacheStoreType.ON_HEAP).evictionsMetric.count()); + assertEquals(1, tieredSpilloverCache.getOnHeapCache().count()); + assertEquals(1, onDiskCache.count()); + assertNotNull(onDiskCache.get(keyToBeEvicted)); + } + + class MockCacheEventListener implements StoreAwareCacheEventListener { + + EnumMap enumMap = new EnumMap<>(CacheStoreType.class); + + MockCacheEventListener() { + for (CacheStoreType cacheStoreType : CacheStoreType.values()) { + enumMap.put(cacheStoreType, new TestStatsHolder()); + } + } + + @Override + public void onMiss(K key, CacheStoreType cacheStoreType) { + enumMap.get(cacheStoreType).missCount.inc(); + } + + @Override + public void onRemoval(StoreAwareCacheRemovalNotification notification) { + if (notification.getRemovalReason().equals(RemovalReason.EVICTED)) { + enumMap.get(notification.getCacheStoreType()).evictionsMetric.inc(); + } else if (notification.getRemovalReason().equals(RemovalReason.INVALIDATED)) { + enumMap.get(notification.getCacheStoreType()).invalidationMetric.inc(); + } + } + + @Override + public void onHit(K key, V value, CacheStoreType cacheStoreType) { + enumMap.get(cacheStoreType).hitCount.inc(); + } + + @Override + public void onCached(K key, V value, CacheStoreType cacheStoreType) { + enumMap.get(cacheStoreType).cachedCount.inc(); + } + + class TestStatsHolder { + final CounterMetric evictionsMetric = new CounterMetric(); + final CounterMetric hitCount = new CounterMetric(); + final CounterMetric missCount = new CounterMetric(); + final CounterMetric cachedCount = new CounterMetric(); + final CounterMetric invalidationMetric = new CounterMetric(); + } + } + + private LoadAwareCacheLoader getLoadAwareCacheLoader() { + return new LoadAwareCacheLoader() { + boolean isLoaded = false; + + @Override + public String load(String key) { + isLoaded = true; + return UUID.randomUUID().toString(); + } + + @Override + public boolean isLoaded() { + return isLoaded; + } + }; + } + + private TieredSpilloverCache intializeTieredSpilloverCache( + int onHeapCacheSize, + int diksCacheSize, + StoreAwareCacheEventListener eventListener, + long diskDeliberateDelay + ) { + StoreAwareCacheBuilder diskCacheBuilder = new MockOnDiskCache.Builder().setMaxSize(diksCacheSize) + .setDeliberateDelay(diskDeliberateDelay); + StoreAwareCacheBuilder onHeapCacheBuilder = new OpenSearchOnHeapCache.Builder() + .setMaximumWeightInBytes(onHeapCacheSize * 20) + .setWeigher((k, v) -> 20); // Will support upto onHeapCacheSize entries + return new TieredSpilloverCache.Builder().setOnHeapCacheBuilder(onHeapCacheBuilder) + .setOnDiskCacheBuilder(diskCacheBuilder) + .setListener(eventListener) + .build(); + } +} + +class MockOnDiskCache implements StoreAwareCache { + + Map cache; + int maxSize; + + long delay; + StoreAwareCacheEventListener eventListener; + + MockOnDiskCache(int maxSize, StoreAwareCacheEventListener eventListener, long delay) { + this.maxSize = maxSize; + this.eventListener = eventListener; + this.delay = delay; + this.cache = new ConcurrentHashMap(); + } + + @Override + public V get(K key) { + V value = cache.get(key); + if (value != null) { + eventListener.onHit(key, value, CacheStoreType.DISK); + } else { + eventListener.onMiss(key, CacheStoreType.DISK); + } + return value; + } + + @Override + public void put(K key, V value) { + if (this.cache.size() >= maxSize) { // For simplification + eventListener.onRemoval(new StoreAwareCacheRemovalNotification<>(key, value, RemovalReason.EVICTED, CacheStoreType.DISK)); + return; + } + try { + Thread.sleep(delay); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + this.cache.put(key, value); + eventListener.onCached(key, value, CacheStoreType.DISK); + } + + @Override + public V computeIfAbsent(K key, LoadAwareCacheLoader loader) throws Exception { + V value = cache.computeIfAbsent(key, key1 -> { + try { + return loader.load(key); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + if (!loader.isLoaded()) { + eventListener.onHit(key, value, CacheStoreType.DISK); + } else { + eventListener.onMiss(key, CacheStoreType.DISK); + eventListener.onCached(key, value, CacheStoreType.DISK); + } + return value; + } + + @Override + public void invalidate(K key) { + if (this.cache.containsKey(key)) { + eventListener.onRemoval(new StoreAwareCacheRemovalNotification<>(key, null, RemovalReason.INVALIDATED, CacheStoreType.DISK)); + } + this.cache.remove(key); + } + + @Override + public void invalidateAll() { + this.cache.clear(); + } + + @Override + public Iterable keys() { + return this.cache.keySet(); + } + + @Override + public long count() { + return this.cache.size(); + } + + @Override + public void refresh() {} + + @Override + public CacheStoreType getTierType() { + return CacheStoreType.DISK; + } + + public static class Builder extends StoreAwareCacheBuilder { + + int maxSize; + long delay; + + @Override + public StoreAwareCache build() { + return new MockOnDiskCache(maxSize, this.getEventListener(), delay); + } + + public Builder setMaxSize(int maxSize) { + this.maxSize = maxSize; + return this; + } + + public Builder setDeliberateDelay(long millis) { + this.delay = millis; + return this; + } + } +} From a9ce180b57dc20408e5a36337dbdc85d6e1fd306 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 9 Jan 2024 11:27:06 -0800 Subject: [PATCH 53/81] Unmute testNoFailuresOnFileReads flaky test (#11824) Signed-off-by: Suraj Singh --- .../java/org/opensearch/index/shard/RemoteIndexShardTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index eacc504428ef1..57a561bc8f2a3 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -465,7 +465,6 @@ public void onReplicationFailure( * blocking update of reader. Once this is done, it corrupts one segment file and ensure that file is deleted in next * round of segment replication by ensuring doc count. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10885") public void testNoFailuresOnFileReads() throws Exception { try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); From 4a42150ba0cbf10022797c038f35faaea2f548f6 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 9 Jan 2024 18:17:47 -0800 Subject: [PATCH 54/81] Bump to Lucene99 (#11421) Upgraded Lucene dependency to 9.9.1. --------- Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- libs/core/licenses/lucene-core-9.8.0.jar.sha1 | 1 - libs/core/licenses/lucene-core-9.9.1.jar.sha1 | 1 + .../src/main/java/org/opensearch/Version.java | 2 +- .../lucene-expressions-9.8.0.jar.sha1 | 1 - .../lucene-expressions-9.9.1.jar.sha1 | 1 + .../mapper/ScaledFloatFieldTypeTests.java | 31 ++++++++------- .../Netty4HttpPipeliningHandlerTests.java | 4 +- .../lucene-analysis-icu-9.8.0.jar.sha1 | 1 - .../lucene-analysis-icu-9.9.1.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.8.0.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.9.1.jar.sha1 | 1 + .../analysis/KuromojiTokenizerFactory.java | 2 +- .../lucene-analysis-nori-9.8.0.jar.sha1 | 1 - .../lucene-analysis-nori-9.9.1.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.8.0.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.9.1.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.8.0.jar.sha1 | 1 - .../lucene-analysis-smartcn-9.9.1.jar.sha1 | 1 + .../lucene-analysis-stempel-9.8.0.jar.sha1 | 1 - .../lucene-analysis-stempel-9.9.1.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.8.0.jar.sha1 | 1 - .../lucene-analysis-morfologik-9.9.1.jar.sha1 | 1 + .../index/codec/CorrelationCodecVersion.java | 12 +++--- .../CorrelationCodec.java | 4 +- .../PerFieldCorrelationVectorsFormat.java | 12 +++--- .../package-info.java | 2 +- .../services/org.apache.lucene.codecs.Codec | 2 +- .../CorrelationCodecTests.java | 6 +-- .../nio/NioHttpPipeliningHandlerTests.java | 4 +- .../lucene-analysis-common-9.8.0.jar.sha1 | 1 - .../lucene-analysis-common-9.9.1.jar.sha1 | 1 + .../lucene-backward-codecs-9.8.0.jar.sha1 | 1 - .../lucene-backward-codecs-9.9.1.jar.sha1 | 1 + server/licenses/lucene-core-9.8.0.jar.sha1 | 1 - server/licenses/lucene-core-9.9.1.jar.sha1 | 1 + .../licenses/lucene-grouping-9.8.0.jar.sha1 | 1 - .../licenses/lucene-grouping-9.9.1.jar.sha1 | 1 + .../lucene-highlighter-9.8.0.jar.sha1 | 1 - .../lucene-highlighter-9.9.1.jar.sha1 | 1 + server/licenses/lucene-join-9.8.0.jar.sha1 | 1 - server/licenses/lucene-join-9.9.1.jar.sha1 | 1 + server/licenses/lucene-memory-9.8.0.jar.sha1 | 1 - server/licenses/lucene-memory-9.9.1.jar.sha1 | 1 + server/licenses/lucene-misc-9.8.0.jar.sha1 | 1 - server/licenses/lucene-misc-9.9.1.jar.sha1 | 1 + server/licenses/lucene-queries-9.8.0.jar.sha1 | 1 - server/licenses/lucene-queries-9.9.1.jar.sha1 | 1 + .../lucene-queryparser-9.8.0.jar.sha1 | 1 - .../lucene-queryparser-9.9.1.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.8.0.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.9.1.jar.sha1 | 1 + .../lucene-spatial-extras-9.8.0.jar.sha1 | 1 - .../lucene-spatial-extras-9.9.1.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.8.0.jar.sha1 | 1 - .../licenses/lucene-spatial3d-9.9.1.jar.sha1 | 1 + server/licenses/lucene-suggest-9.8.0.jar.sha1 | 1 - server/licenses/lucene-suggest-9.9.1.jar.sha1 | 1 + .../index/store/CorruptedFileIT.java | 12 ++++++ .../validate/SimpleValidateQueryIT.java | 5 ++- .../lucene/queries/BlendedTermQuery.java | 3 +- .../search/grouping/CollapseTopFieldDocs.java | 3 +- .../search/BottomSortValuesCollector.java | 3 +- .../org/opensearch/common/lucene/Lucene.java | 2 +- .../org/opensearch/index/IndexModule.java | 4 +- .../opensearch/index/codec/CodecService.java | 12 +++--- .../PerFieldMappingPostingFormatCodec.java | 4 +- .../index/fielddata/IndexFieldData.java | 8 ++++ .../BytesRefFieldComparatorSource.java | 5 ++- .../DoubleValuesComparatorSource.java | 5 ++- .../FloatValuesComparatorSource.java | 5 ++- .../HalfFloatValuesComparatorSource.java | 5 ++- .../IntValuesComparatorSource.java | 5 ++- .../LongValuesComparatorSource.java | 5 ++- .../UnsignedLongValuesComparatorSource.java | 5 ++- .../index/mapper/CompletionFieldMapper.java | 4 +- .../comparators/HalfFloatComparator.java | 5 ++- .../comparators/UnsignedLongComparator.java | 5 ++- .../queries/SearchAfterSortedDocQuery.java | 3 +- .../bucket/BestBucketsDeferringCollector.java | 5 ++- .../bucket/composite/CompositeAggregator.java | 5 ++- .../search/internal/ContextIndexSearcher.java | 1 + .../search/sort/GeoDistanceSortBuilder.java | 5 ++- .../sort/SortedWiderNumericSortField.java | 7 ++-- .../BottomSortValuesCollectorTests.java | 3 +- .../opensearch/index/codec/CodecTests.java | 20 +++++----- .../engine/CompletionStatsCacheTests.java | 8 ++-- .../index/mapper/RangeFieldTypeTests.java | 4 +- .../query/GeoDistanceQueryBuilderTests.java | 5 ++- .../AggregationProcessorTests.java | 38 ++++++++++++++++--- .../metrics/InternalTopHitsTests.java | 3 +- .../internal/ContextIndexSearcherTests.java | 4 +- .../search/query/QueryPhaseTests.java | 3 +- .../search/query/QueryProfilePhaseTests.java | 3 +- .../searchafter/SearchAfterBuilderTests.java | 3 +- 96 files changed, 219 insertions(+), 136 deletions(-) delete mode 100644 libs/core/licenses/lucene-core-9.8.0.jar.sha1 create mode 100644 libs/core/licenses/lucene-core-9.9.1.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 rename plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/{correlation950 => correlation990}/CorrelationCodec.java (97%) rename plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/{correlation950 => correlation990}/PerFieldCorrelationVectorsFormat.java (77%) rename plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/{correlation950 => correlation990}/package-info.java (96%) rename plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/{correlation950 => correlation990}/CorrelationCodecTests.java (98%) delete mode 100644 server/licenses/lucene-analysis-common-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-core-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-join-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.9.1.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.8.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.9.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index d23a439c35d08..8e0317c6d5475 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -164,6 +164,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) - Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) - Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.0 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795)) +- Bump `Lucene` from 9.8.0 to 9.9.1 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 58b54e7b77a1e..eceb08c05953d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.8.0 +lucene = 9.9.1 bundled_jdk_vendor = adoptium bundled_jdk = 21.0.1+12 diff --git a/libs/core/licenses/lucene-core-9.8.0.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0.jar.sha1 deleted file mode 100644 index f9a3e2f3cbee6..0000000000000 --- a/libs/core/licenses/lucene-core-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/libs/core/licenses/lucene-core-9.9.1.jar.sha1 b/libs/core/licenses/lucene-core-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..ae596196d9e6a --- /dev/null +++ b/libs/core/licenses/lucene-core-9.9.1.jar.sha1 @@ -0,0 +1 @@ +55249fa9a0ed321adcf8283c6f3b649a6812b0a9 \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index d94be3f25b53d..b2a8e619c9720 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -99,7 +99,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_2 = new Version(2110299, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_9_1); public static final Version CURRENT = V_3_0_0; public static Version fromId(int id) { diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 deleted file mode 100644 index 892865a017f48..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7725476acfcb9bdfeff1b813ce15c39c6b857dc2 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..402cc36ba3d68 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.9.1.jar.sha1 @@ -0,0 +1 @@ +1782a69d0e83af9cc3c65db0dcd2e7e7c1e5f90e \ No newline at end of file diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java index d83811e6668eb..a653edbd05992 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldTypeTests.java @@ -135,35 +135,40 @@ public void testRangeQuery() throws IOException { public void testRoundsUpperBoundCorrectly() { ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType("scaled_float", 100); Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 9]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 10]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(null, 79.99, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9223372036854775808 TO 7999]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9223372036854775808 TO 7999]", getQueryString(scaledFloatQ)); } public void testRoundsLowerBoundCorrectly() { ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType("scaled_float", 100); Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, MOCK_QSC); - assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, MOCK_QSC); - assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, MOCK_QSC); - assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, MOCK_QSC); - assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-9 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, MOCK_QSC); - assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", getQueryString(scaledFloatQ)); scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, MOCK_QSC); - assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString()); + assertEquals("scaled_float:[-10 TO 9223372036854775807]", getQueryString(scaledFloatQ)); + } + + private String getQueryString(Query query) { + assertTrue(query instanceof IndexOrDocValuesQuery); + return ((IndexOrDocValuesQuery) query).getIndexQuery().toString(); } public void testValueForSearch() { diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java index 743b51e979fb7..99d576bed01c7 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpPipeliningHandlerTests.java @@ -79,7 +79,7 @@ public class Netty4HttpPipeliningHandlerTests extends OpenSearchTestCase { @After public void tearDown() throws Exception { waitingRequests.keySet().forEach(this::finishRequest); - shutdownExecutorService(); + shutdownExecutorServices(); super.tearDown(); } @@ -88,7 +88,7 @@ private CountDownLatch finishRequest(String url) { return finishingRequests.get(url); } - private void shutdownExecutorService() throws InterruptedException { + private void shutdownExecutorServices() throws InterruptedException { if (!handlerService.isShutdown()) { handlerService.shutdown(); handlerService.awaitTermination(10, TimeUnit.SECONDS); diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 deleted file mode 100644 index ef410899981ca..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7133d34e92770f59eb28686f4d511b9f3f32e970 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..dde9b7c100dc7 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.9.1.jar.sha1 @@ -0,0 +1 @@ +147cb42a90a29501d9ca6094ea0db1d213f3076a \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 deleted file mode 100644 index 46b83c9e40b3a..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -be44282e1f6b91a0650fcceb558053d6bdd4863d \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..b70a22e9db096 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.9.1.jar.sha1 @@ -0,0 +1 @@ +b034dd3a975763e083c7e11b5d0f7d516ab72590 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java index 2939711f6f7e1..76b109932e642 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java @@ -38,7 +38,7 @@ import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; -import org.apache.lucene.analysis.ja.util.CSVUtil; +import org.apache.lucene.analysis.util.CSVUtil; import org.opensearch.OpenSearchException; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 deleted file mode 100644 index 36664695a7818..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd1f80d33346f7e588685484ef29a304db5190e4 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..323f165c62790 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.9.1.jar.sha1 @@ -0,0 +1 @@ +c405f2f7d0fc127d88dfbadd753469b2028fdf52 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 deleted file mode 100644 index 003ccdf8b0727..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b9ffdc7a52d2087ecb03318ec06305b480cdfe82 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..dd659ddf4de95 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.9.1.jar.sha1 @@ -0,0 +1 @@ +970e5775876c2d7e1b9af7421a4b17d96f63faf4 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 deleted file mode 100644 index e22eaa474016f..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f73e2007b133fb699e517ef13b4952844f0150d8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..ed0e81d8f1f75 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.9.1.jar.sha1 @@ -0,0 +1 @@ +2421e5238e9b8484929291744d709dd743c01da1 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 deleted file mode 100644 index 1ebe42a2a2f56..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2c09cbc021a8f81a01600a1d2a999361e70f7aed \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..fd8e000088180 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.9.1.jar.sha1 @@ -0,0 +1 @@ +a23e7de4cd9ae7af285c89dc1c55e0ac3f157fd3 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 deleted file mode 100644 index 3c4523d45c0f5..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b054f2c7b11fc7c5601b4c3cdf18aa7508612898 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..d0e7a3b0c751c --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.9.1.jar.sha1 @@ -0,0 +1 @@ +8d9bce1ea51db279878c51091dd9aefc7b335da4 \ No newline at end of file diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java index 5e2cb8bfbc03a..3fcc995fb4199 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/CorrelationCodecVersion.java @@ -9,10 +9,10 @@ package org.opensearch.plugin.correlation.core.index.codec; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.index.mapper.MapperService; -import org.opensearch.plugin.correlation.core.index.codec.correlation950.CorrelationCodec; -import org.opensearch.plugin.correlation.core.index.codec.correlation950.PerFieldCorrelationVectorsFormat; +import org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec; +import org.opensearch.plugin.correlation.core.index.codec.correlation990.PerFieldCorrelationVectorsFormat; import java.util.Optional; import java.util.function.BiFunction; @@ -24,15 +24,15 @@ * @opensearch.internal */ public enum CorrelationCodecVersion { - V_9_5_0( + V_9_9_0( "CorrelationCodec", - new Lucene95Codec(), + new Lucene99Codec(), new PerFieldCorrelationVectorsFormat(Optional.empty()), (userCodec, mapperService) -> new CorrelationCodec(userCodec, new PerFieldCorrelationVectorsFormat(Optional.of(mapperService))), CorrelationCodec::new ); - private static final CorrelationCodecVersion CURRENT = V_9_5_0; + private static final CorrelationCodecVersion CURRENT = V_9_9_0; private final String codecName; private final Codec defaultCodecDelegate; private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java similarity index 97% rename from plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java rename to plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java index f91ba429fbea9..022972e2e06c3 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodec.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodec.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; +package org.opensearch.plugin.correlation.core.index.codec.correlation990; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FilterCodec; @@ -19,7 +19,7 @@ * @opensearch.internal */ public class CorrelationCodec extends FilterCodec { - private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_5_0; + private static final CorrelationCodecVersion VERSION = CorrelationCodecVersion.V_9_9_0; private final PerFieldCorrelationVectorsFormat perFieldCorrelationVectorsFormat; /** diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java similarity index 77% rename from plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java rename to plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java index f6862ecc17736..89cc0b614a1a5 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/PerFieldCorrelationVectorsFormat.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/PerFieldCorrelationVectorsFormat.java @@ -6,9 +6,9 @@ * compatible open source license. */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; +package org.opensearch.plugin.correlation.core.index.codec.correlation990; -import org.apache.lucene.codecs.lucene95.Lucene95HnswVectorsFormat; +import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsFormat; import org.opensearch.index.mapper.MapperService; import org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat; @@ -26,10 +26,10 @@ public class PerFieldCorrelationVectorsFormat extends BasePerFieldCorrelationVec public PerFieldCorrelationVectorsFormat(final Optional mapperService) { super( mapperService, - Lucene95HnswVectorsFormat.DEFAULT_MAX_CONN, - Lucene95HnswVectorsFormat.DEFAULT_BEAM_WIDTH, - Lucene95HnswVectorsFormat::new, - Lucene95HnswVectorsFormat::new + Lucene99HnswVectorsFormat.DEFAULT_MAX_CONN, + Lucene99HnswVectorsFormat.DEFAULT_BEAM_WIDTH, + Lucene99HnswVectorsFormat::new, + Lucene99HnswVectorsFormat::new ); } } diff --git a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java similarity index 96% rename from plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java rename to plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java index b4dad34d2718e..fc2a9de58a73a 100644 --- a/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/package-info.java +++ b/plugins/events-correlation-engine/src/main/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/package-info.java @@ -9,4 +9,4 @@ /** * custom Lucene9.5 codec package for events-correlation-engine */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; +package org.opensearch.plugin.correlation.core.index.codec.correlation990; diff --git a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec index 598a3b6af73c2..013c17e4a9736 100644 --- a/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec +++ b/plugins/events-correlation-engine/src/main/resources/META-INF/services/org.apache.lucene.codecs.Codec @@ -1 +1 @@ -org.opensearch.plugin.correlation.core.index.codec.correlation950.CorrelationCodec +org.opensearch.plugin.correlation.core.index.codec.correlation990.CorrelationCodec diff --git a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java similarity index 98% rename from plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java rename to plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java index b93172537d419..7223b450a136c 100644 --- a/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation950/CorrelationCodecTests.java +++ b/plugins/events-correlation-engine/src/test/java/org/opensearch/plugin/correlation/core/index/codec/correlation990/CorrelationCodecTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.plugin.correlation.core.index.codec.correlation950; +package org.opensearch.plugin.correlation.core.index.codec.correlation990; import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Document; @@ -32,7 +32,7 @@ import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_EF_CONSTRUCTION; import static org.opensearch.plugin.correlation.core.index.codec.BasePerFieldCorrelationVectorsFormat.METHOD_PARAMETER_M; -import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_5_0; +import static org.opensearch.plugin.correlation.core.index.codec.CorrelationCodecVersion.V_9_9_0; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -56,7 +56,7 @@ public void testCorrelationVectorIndex() throws Exception { Function perFieldCorrelationVectorsProvider = mapperService -> new PerFieldCorrelationVectorsFormat(Optional.of(mapperService)); Function correlationCodecProvider = (correlationVectorsFormat -> new CorrelationCodec( - V_9_5_0.getDefaultCodecDelegate(), + V_9_9_0.getDefaultCodecDelegate(), correlationVectorsFormat )); testCorrelationVectorIndex(correlationCodecProvider, perFieldCorrelationVectorsProvider); diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java index 46cf6ae708d1c..d0c0406bd7774 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpPipeliningHandlerTests.java @@ -80,7 +80,7 @@ public class NioHttpPipeliningHandlerTests extends OpenSearchTestCase { @After public void cleanup() throws Exception { waitingRequests.keySet().forEach(this::finishRequest); - shutdownExecutorService(); + shutdownExecutorServices(); } private CountDownLatch finishRequest(String url) { @@ -88,7 +88,7 @@ private CountDownLatch finishRequest(String url) { return finishingRequests.get(url); } - private void shutdownExecutorService() throws InterruptedException { + private void shutdownExecutorServices() throws InterruptedException { if (!handlerService.isShutdown()) { handlerService.shutdown(); handlerService.awaitTermination(10, TimeUnit.SECONDS); diff --git a/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 deleted file mode 100644 index 6ad304fa52c12..0000000000000 --- a/server/licenses/lucene-analysis-common-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -36f0363325ca7bf62c180160d1ed5165c7c37795 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 b/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..c9e6120da7497 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.9.1.jar.sha1 @@ -0,0 +1 @@ +24c8401b530308f9568eb7b408c2029c63f564c6 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 deleted file mode 100644 index f104c4207d390..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e98fb408028f40170e6d87c16422bfdc0bb2e392 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 b/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..69ecf6aa68200 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.9.1.jar.sha1 @@ -0,0 +1 @@ +11c46007366bb037be7d271ab0a5849b1d544662 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.8.0.jar.sha1 b/server/licenses/lucene-core-9.8.0.jar.sha1 deleted file mode 100644 index f9a3e2f3cbee6..0000000000000 --- a/server/licenses/lucene-core-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e8421c5f8573bcf22e9265fc7e19469545a775a \ No newline at end of file diff --git a/server/licenses/lucene-core-9.9.1.jar.sha1 b/server/licenses/lucene-core-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..ae596196d9e6a --- /dev/null +++ b/server/licenses/lucene-core-9.9.1.jar.sha1 @@ -0,0 +1 @@ +55249fa9a0ed321adcf8283c6f3b649a6812b0a9 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.8.0.jar.sha1 b/server/licenses/lucene-grouping-9.8.0.jar.sha1 deleted file mode 100644 index ab132121b2edc..0000000000000 --- a/server/licenses/lucene-grouping-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d39184518351178c404ed9669fc6cb6111f2288d \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.9.1.jar.sha1 b/server/licenses/lucene-grouping-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..e7df056400661 --- /dev/null +++ b/server/licenses/lucene-grouping-9.9.1.jar.sha1 @@ -0,0 +1 @@ +2f2785e17c5c823cc8f41a7ddb4647aaca8ee773 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.8.0.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0.jar.sha1 deleted file mode 100644 index c7cb678fb7b72..0000000000000 --- a/server/licenses/lucene-highlighter-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1ac38c8278dbd63dfab30744a41dd955a415a31c \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.9.1.jar.sha1 b/server/licenses/lucene-highlighter-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..828c7294aa586 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.9.1.jar.sha1 @@ -0,0 +1 @@ +30928513461bf79a5cb057e84da7d34a1e53227d \ No newline at end of file diff --git a/server/licenses/lucene-join-9.8.0.jar.sha1 b/server/licenses/lucene-join-9.8.0.jar.sha1 deleted file mode 100644 index 2b6cb8af4faf6..0000000000000 --- a/server/licenses/lucene-join-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d64fc57bb6e718d906413a9f73c713e6d4d8bb0 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.9.1.jar.sha1 b/server/licenses/lucene-join-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..34b44ca8c6ad5 --- /dev/null +++ b/server/licenses/lucene-join-9.9.1.jar.sha1 @@ -0,0 +1 @@ +b9c8cc99632280148f92b4c0a64111c482d5d0ac \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.8.0.jar.sha1 b/server/licenses/lucene-memory-9.8.0.jar.sha1 deleted file mode 100644 index 5fdfee401dd0a..0000000000000 --- a/server/licenses/lucene-memory-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5283ac71d6ccecb5e00c7b52df2faec012f2625a \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.9.1.jar.sha1 b/server/licenses/lucene-memory-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..b75fba4c331e9 --- /dev/null +++ b/server/licenses/lucene-memory-9.9.1.jar.sha1 @@ -0,0 +1 @@ +49f820b1b321860fa42a4f7583e8ed8f77b9c1c2 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.8.0.jar.sha1 b/server/licenses/lucene-misc-9.8.0.jar.sha1 deleted file mode 100644 index cf815cba15862..0000000000000 --- a/server/licenses/lucene-misc-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9a57b049cf51a5e9c9c1909c420f645f1b6f9a54 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.9.1.jar.sha1 b/server/licenses/lucene-misc-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..f1e1e056004e9 --- /dev/null +++ b/server/licenses/lucene-misc-9.9.1.jar.sha1 @@ -0,0 +1 @@ +db7c30217602dfcda394a4d0f0a9e68140d385a6 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.8.0.jar.sha1 b/server/licenses/lucene-queries-9.8.0.jar.sha1 deleted file mode 100644 index 09f369ef18e12..0000000000000 --- a/server/licenses/lucene-queries-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -628db4ef46f1c6a05145bdac1d1bc4ace6341b13 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.9.1.jar.sha1 b/server/licenses/lucene-queries-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..888b9b4a05ec8 --- /dev/null +++ b/server/licenses/lucene-queries-9.9.1.jar.sha1 @@ -0,0 +1 @@ +d157547bd24edc8e9d9d59c273107dc3ac5fde5e \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.8.0.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0.jar.sha1 deleted file mode 100644 index 2a42a8956b18b..0000000000000 --- a/server/licenses/lucene-queryparser-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -982faf2bfa55542bf57fbadef54c19ac00f57cae \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.9.1.jar.sha1 b/server/licenses/lucene-queryparser-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..1ce8a069a0f4e --- /dev/null +++ b/server/licenses/lucene-queryparser-9.9.1.jar.sha1 @@ -0,0 +1 @@ +12d844fe224f6f97c510ac20d68903ed7f626f6c \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.8.0.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0.jar.sha1 deleted file mode 100644 index 64a0b07f72d29..0000000000000 --- a/server/licenses/lucene-sandbox-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06493dbd14d02537716822254866a94458f4d842 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.9.1.jar.sha1 b/server/licenses/lucene-sandbox-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..14fd86dadc404 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.9.1.jar.sha1 @@ -0,0 +1 @@ +272e588fd3d8c0a401b28a1ac715f27044bf62ec \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 deleted file mode 100644 index d1bcb0581435c..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d9a731822ad6eefa1ba288a0c158d478522f165 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 b/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..0efd5a7595bfe --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.9.1.jar.sha1 @@ -0,0 +1 @@ +e066432e7ab02b2a4914f989bcd8c44adbf340ad \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 deleted file mode 100644 index d17459cc569a9..0000000000000 --- a/server/licenses/lucene-spatial3d-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ce752a52b2d4eac90633c7df7982e29504f99e76 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 b/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..7f06466e4c721 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.9.1.jar.sha1 @@ -0,0 +1 @@ +fa54c9b962778e28ebc0efb9f75297781350361a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.8.0.jar.sha1 b/server/licenses/lucene-suggest-9.8.0.jar.sha1 deleted file mode 100644 index ff47b87672d2c..0000000000000 --- a/server/licenses/lucene-suggest-9.8.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f977f96f2093b7fddea6b67caa2e1c5b10edebf6 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.9.1.jar.sha1 b/server/licenses/lucene-suggest-9.9.1.jar.sha1 new file mode 100644 index 0000000000000..06732480d1b6c --- /dev/null +++ b/server/licenses/lucene-suggest-9.9.1.jar.sha1 @@ -0,0 +1 @@ +9554de5b22ae7483b344b94a9a956960b7a5d49c \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 8291fef5d177b..f46f413f4d23f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -710,6 +710,7 @@ public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() thro final NodeStats primaryNode = dataNodeStats.get(0); final NodeStats replicaNode = dataNodeStats.get(1); + assertAcked( prepareCreate("test").setSettings( Settings.builder() @@ -795,6 +796,17 @@ public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() thro // Assert the cluster returns to green status because the replica will be promoted to primary ensureGreen(); + + // After Lucene 9.9 check index will flag corruption with old (not the latest) commit points. + // For this test our previous corrupt commit "segments_2" will remain on the primary. + // This asserts this is the case, and then resets check index status. + assertEquals("Check index has a single failure", 1, checkIndexFailures.size()); + assertTrue( + checkIndexFailures.get(0) + .getMessage() + .contains("could not read old (not latest) commit point segments file \"segments_2\" in directory") + ); + resetCheckIndexStatus(); } private int numShards(String... index) { diff --git a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java index e619b93cdd93b..4ac2e1669ca67 100644 --- a/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/validate/SimpleValidateQueryIT.java @@ -270,7 +270,10 @@ public void testExplainDateRangeInQueryString() { long twoMonthsAgo = now.minus(2, ChronoUnit.MONTHS).truncatedTo(ChronoUnit.DAYS).toEpochSecond() * 1000; long rangeEnd = (now.plus(1, ChronoUnit.DAYS).truncatedTo(ChronoUnit.DAYS).toEpochSecond() * 1000) - 1; - assertThat(response.getQueryExplanation().get(0).getExplanation(), equalTo("past:[" + twoMonthsAgo + " TO " + rangeEnd + "]")); + assertThat( + response.getQueryExplanation().get(0).getExplanation(), + containsString("past:[" + twoMonthsAgo + " TO " + rangeEnd + "]") + ); assertThat(response.isValid(), equalTo(true)); } diff --git a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java index 345be330f048c..b47b974b96fed 100644 --- a/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java +++ b/server/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java @@ -100,11 +100,10 @@ public Query rewrite(IndexSearcher searcher) throws IOException { return rewritten; } IndexReader reader = searcher.getIndexReader(); - IndexReaderContext context = reader.getContext(); TermStates[] ctx = new TermStates[terms.length]; int[] docFreqs = new int[ctx.length]; for (int i = 0; i < terms.length; i++) { - ctx[i] = TermStates.build(context, terms[i], true); + ctx[i] = TermStates.build(searcher, terms[i], true); docFreqs[i] = ctx[i].docFreq(); } diff --git a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java index e93e5cdcc3f7b..961587113173d 100644 --- a/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java +++ b/server/src/main/java/org/apache/lucene/search/grouping/CollapseTopFieldDocs.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -144,7 +145,7 @@ private static class MergeSortQueue extends PriorityQueue { reverseMul = new int[sortFields.length]; for (int compIDX = 0; compIDX < sortFields.length; compIDX++) { final SortField sortField = sortFields[compIDX]; - comparators[compIDX] = sortField.getComparator(1, false); + comparators[compIDX] = sortField.getComparator(1, Pruning.NONE); reverseMul[compIDX] = sortField.getReverse() ? -1 : 1; } } diff --git a/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java b/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java index c831c80b6455c..bce8d9fb2b1ca 100644 --- a/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java +++ b/server/src/main/java/org/opensearch/action/search/BottomSortValuesCollector.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; import org.opensearch.search.DocValueFormat; @@ -59,7 +60,7 @@ class BottomSortValuesCollector { this.reverseMuls = new int[sortFields.length]; this.sortFields = sortFields; for (int i = 0; i < sortFields.length; i++) { - comparators[i] = sortFields[i].getComparator(1, false); + comparators[i] = sortFields[i].getComparator(1, Pruning.NONE); reverseMuls[i] = sortFields[i].getReverse() ? -1 : 1; } } diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 5b521081ac63b..2c7b6b552b43f 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -110,7 +110,7 @@ * @opensearch.internal */ public class Lucene { - public static final String LATEST_CODEC = "Lucene95"; + public static final String LATEST_CODEC = "Lucene99"; public static final String SOFT_DELETES_FIELD = "__soft_deletes"; diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 545623287bae8..6ac10a221d49e 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -161,7 +161,7 @@ public final class IndexModule { /** Which lucene file extensions to load with the mmap directory when using hybridfs store. This settings is ignored if {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} is set. * This is an expert setting. - * @see Lucene File Extensions. + * @see Lucene File Extensions. * * @deprecated This setting will be removed in OpenSearch 3.x. Use {@link #INDEX_STORE_HYBRID_NIO_EXTENSIONS} instead. */ @@ -206,7 +206,7 @@ public Iterator> settings() { /** Which lucene file extensions to load with nio. All others will default to mmap. Takes precedence over {@link #INDEX_STORE_HYBRID_MMAP_EXTENSIONS}. * This is an expert setting. - * @see Lucene File Extensions. + * @see Lucene File Extensions. */ public static final Setting> INDEX_STORE_HYBRID_NIO_EXTENSIONS = Setting.listSetting( "index.store.hybrid.nio.extensions", diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index 775fc88b504f5..67f38536a0d11 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; -import org.apache.lucene.codecs.lucene95.Lucene95Codec.Mode; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec.Mode; import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.IndexSettings; @@ -68,10 +68,10 @@ public CodecService(@Nullable MapperService mapperService, IndexSettings indexSe final MapBuilder codecs = MapBuilder.newMapBuilder(); assert null != indexSettings; if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene95Codec()); - codecs.put(LZ4, new Lucene95Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Mode.BEST_COMPRESSION)); - codecs.put(ZLIB, new Lucene95Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene99Codec()); + codecs.put(LZ4, new Lucene99Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene99Codec(Mode.BEST_COMPRESSION)); + codecs.put(ZLIB, new Lucene99Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(LZ4, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index d3207557273a5..dc28ad2d6dc07 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -37,7 +37,7 @@ import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MappedFieldType; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class PerFieldMappingPostingFormatCodec extends Lucene95Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene99Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); diff --git a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java index 6a79679cc2f09..a63e1d418e3ba 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java +++ b/server/src/main/java/org/opensearch/index/fielddata/IndexFieldData.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparatorSource; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -150,6 +151,13 @@ public void disableSkipping() { this.enableSkipping = false; } + protected Pruning filterPruning(Pruning pruning) { + if (this.enableSkipping) { + return pruning; + } + return Pruning.NONE; + } + /** * Simple wrapper class around a filter that matches parent documents * and a filter that matches child documents. For every root document R, diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java index 3aa4d9cb782ca..4c6eba2f0c7ec 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/BytesRefFieldComparatorSource.java @@ -38,6 +38,7 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.TermOrdValComparator; @@ -94,7 +95,7 @@ protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOEx protected void setScorer(Scorable scorer, LeafReaderContext context) {} @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final boolean sortMissingLast = sortMissingLast(missingValue) ^ reversed; @@ -105,7 +106,7 @@ public FieldComparator newComparator(String fieldname, int numHits, boolean e indexFieldData.getFieldName(), sortMissingLast, reversed, - enableSkipping + filterPruning(pruning) ) { @Override diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java index 4f9f05addc8c7..fd4fac3877f2f 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/DoubleValuesComparatorSource.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.DoubleComparator; @@ -98,11 +99,11 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, doubl protected void setScorer(Scorable scorer, LeafReaderContext context) {} @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final double dMissingValue = (Double) missingObject(missingValue, reversed); - return new DoubleComparator(numHits, fieldname, dMissingValue, reversed, enableSkipping && this.enableSkipping) { + return new DoubleComparator(numHits, fieldname, dMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new DoubleLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java index 3fb8b5c6ce393..b106ba268bcb9 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/FloatValuesComparatorSource.java @@ -36,6 +36,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.FloatComparator; @@ -91,11 +92,11 @@ private NumericDoubleValues getNumericDocValues(LeafReaderContext context, float } @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final float fMissingValue = (Float) missingObject(missingValue, reversed); - return new FloatComparator(numHits, fieldname, fMissingValue, reversed, enableSkipping && this.enableSkipping) { + return new FloatComparator(numHits, fieldname, fMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new FloatLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java index e0227083f4181..e2e56dcb14fdf 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/HalfFloatValuesComparatorSource.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.util.BitSet; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; @@ -42,11 +43,11 @@ public HalfFloatValuesComparatorSource( } @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final float fMissingValue = (Float) missingObject(missingValue, reversed); - return new HalfFloatComparator(numHits, fieldname, fMissingValue, reversed, enableSkipping && this.enableSkipping) { + return new HalfFloatComparator(numHits, fieldname, fMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new HalfFloatLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java index 3a3fdb85c3415..8f540cc6ae9d9 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/IntValuesComparatorSource.java @@ -19,6 +19,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.IntComparator; import org.apache.lucene.util.BitSet; @@ -70,11 +71,11 @@ private NumericDocValues getNumericDocValues(LeafReaderContext context, int miss } @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final int iMissingValue = (Integer) missingObject(missingValue, reversed); - return new IntComparator(numHits, fieldname, iMissingValue, reversed, enableSkipping && this.enableSkipping) { + return new IntComparator(numHits, fieldname, iMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new IntLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java index 57568aa84fa5c..3666cd8d6dfea 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/LongValuesComparatorSource.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.BitSet; @@ -114,11 +115,11 @@ private NumericDocValues getNumericDocValues(LeafReaderContext context, long mis } @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final long lMissingValue = (Long) missingObject(missingValue, reversed); - return new LongComparator(numHits, fieldname, lMissingValue, reversed, enableSkipping && this.enableSkipping) { + return new LongComparator(numHits, fieldname, lMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new LongLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java index 9d1edde1fe08f..3714561b63e44 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java @@ -14,6 +14,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.opensearch.common.Nullable; @@ -88,11 +89,11 @@ public Object missingObject(Object missingValue, boolean reversed) { } @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { assert indexFieldData == null || fieldname.equals(indexFieldData.getFieldName()); final BigInteger ulMissingValue = (BigInteger) missingObject(missingValue, reversed); - return new UnsignedLongComparator(numHits, fieldname, ulMissingValue, reversed, enableSkipping && this.enableSkipping) { + return new UnsignedLongComparator(numHits, fieldname, ulMissingValue, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new UnsignedLongLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java index e1413fd9b4bbe..a9d9f6cb35fcb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/CompletionFieldMapper.java @@ -35,7 +35,7 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.suggest.document.Completion90PostingsFormat; +import org.apache.lucene.search.suggest.document.Completion99PostingsFormat; import org.apache.lucene.search.suggest.document.CompletionAnalyzer; import org.apache.lucene.search.suggest.document.CompletionQuery; import org.apache.lucene.search.suggest.document.FuzzyCompletionQuery; @@ -330,7 +330,7 @@ public ContextMappings getContextMappings() { */ public static synchronized PostingsFormat postingsFormat() { if (postingsFormat == null) { - postingsFormat = new Completion90PostingsFormat(); + postingsFormat = new Completion99PostingsFormat(); } return postingsFormat; } diff --git a/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java index 6244fa647b042..b2e2ba8001b88 100644 --- a/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java +++ b/server/src/main/java/org/opensearch/index/search/comparators/HalfFloatComparator.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.comparators.NumericComparator; import java.io.IOException; @@ -25,8 +26,8 @@ public class HalfFloatComparator extends NumericComparator { protected float topValue; protected float bottom; - public HalfFloatComparator(int numHits, String field, Float missingValue, boolean reverse, boolean enableSkipping) { - super(field, missingValue != null ? missingValue : 0.0f, reverse, enableSkipping, HalfFloatPoint.BYTES); + public HalfFloatComparator(int numHits, String field, Float missingValue, boolean reverse, Pruning pruning) { + super(field, missingValue != null ? missingValue : 0.0f, reverse, pruning, HalfFloatPoint.BYTES); values = new float[numHits]; } diff --git a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java index d46b34fe97356..2b6bd9933e553 100644 --- a/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java +++ b/server/src/main/java/org/opensearch/index/search/comparators/UnsignedLongComparator.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.sandbox.document.BigIntegerPoint; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.comparators.NumericComparator; import org.opensearch.common.Numbers; @@ -23,8 +24,8 @@ public class UnsignedLongComparator extends NumericComparator { protected BigInteger topValue; protected BigInteger bottom; - public UnsignedLongComparator(int numHits, String field, BigInteger missingValue, boolean reverse, boolean enableSkipping) { - super(field, missingValue != null ? missingValue : Numbers.MIN_UNSIGNED_LONG_VALUE, reverse, enableSkipping, BigIntegerPoint.BYTES); + public UnsignedLongComparator(int numHits, String field, BigInteger missingValue, boolean reverse, Pruning pruning) { + super(field, missingValue != null ? missingValue : Numbers.MIN_UNSIGNED_LONG_VALUE, reverse, pruning, BigIntegerPoint.BYTES); values = new BigInteger[numHits]; } diff --git a/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java b/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java index e417a2eaa7cf4..600ba5b5a92d8 100644 --- a/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java +++ b/server/src/main/java/org/opensearch/lucene/queries/SearchAfterSortedDocQuery.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.ScoreMode; @@ -77,7 +78,7 @@ public SearchAfterSortedDocQuery(Sort sort, FieldDoc after) { this.reverseMuls = new int[numFields]; for (int i = 0; i < numFields; i++) { SortField sortField = sort.getSort()[i]; - FieldComparator fieldComparator = sortField.getComparator(1, false); + FieldComparator fieldComparator = sortField.getComparator(1, Pruning.NONE); @SuppressWarnings("unchecked") FieldComparator comparator = (FieldComparator) fieldComparator; comparator.setTopValue(after.fields[i]); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index 2e7c4659bcb00..223be3ba2d1ae 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -90,7 +90,7 @@ static class Entry { protected PackedLongValues.Builder docDeltasBuilder; protected PackedLongValues.Builder bucketsBuilder; protected long maxBucket = -1; - protected boolean finished = false; + protected boolean finished; protected LongHash selectedBuckets; /** @@ -101,6 +101,9 @@ static class Entry { public BestBucketsDeferringCollector(SearchContext context, boolean isGlobal) { this.searchContext = context; this.isGlobal = isGlobal; + // a postCollection call is not made by the IndexSearcher when there are no segments. + // In this case init the collector as finished. + this.finished = context.searcher().getLeafContexts().isEmpty(); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index f2a4d5cd46127..317c2a357bac5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -45,6 +45,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Scorer; @@ -354,8 +355,8 @@ public int hashCode() { } @Override - public FieldComparator getComparator(int numHits, boolean enableSkipping) { - return new LongComparator(1, delegate.getField(), (Long) missingValue, delegate.getReverse(), false) { + public FieldComparator getComparator(int numHits, Pruning pruning) { + return new LongComparator(1, delegate.getField(), (Long) missingValue, delegate.getReverse(), Pruning.NONE) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new LongLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java index b042f3cf41d61..403b0b545c113 100644 --- a/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/opensearch/search/internal/ContextIndexSearcher.java @@ -102,6 +102,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { * The interval at which we check for search cancellation when we cannot use * a {@link CancellableBulkScorer}. See {@link #intersectScorerAndBitSet}. */ + private static final int CHECK_CANCELLED_SCORER_INTERVAL = 1 << 11; private AggregatedDfs aggregatedDfs; diff --git a/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java index c4bb54de2eedd..0499bba3245c6 100644 --- a/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java @@ -38,6 +38,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.comparators.DoubleComparator; import org.apache.lucene.util.BitSet; @@ -734,8 +735,8 @@ private NumericDoubleValues getNumericDoubleValues(LeafReaderContext context) th } @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { - return new DoubleComparator(numHits, null, null, reversed, enableSkipping) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { + return new DoubleComparator(numHits, null, null, reversed, filterPruning(pruning)) { @Override public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { return new DoubleLeafComparator(context) { diff --git a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java index bded2417ba6c1..10cc832fdb684 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java +++ b/server/src/main/java/org/opensearch/search/sort/SortedWiderNumericSortField.java @@ -16,6 +16,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.comparators.NumericComparator; @@ -44,13 +45,13 @@ public SortedWiderNumericSortField(String field, Type type, boolean reverse) { * Creates and return a comparator, which always converts Numeric to double * and compare to support multi type comparison between numeric values * @param numHits number of top hits the queue will store - * @param enableSkipping true if the comparator can skip documents via {@link + * @param pruning controls how the comparator skips documents via {@link * LeafFieldComparator#competitiveIterator()} * @return NumericComparator */ @Override - public FieldComparator getComparator(int numHits, boolean enableSkipping) { - return new NumericComparator(getField(), (Number) getMissingValue(), getReverse(), enableSkipping, Double.BYTES) { + public FieldComparator getComparator(int numHits, Pruning pruning) { + return new NumericComparator(getField(), (Number) getMissingValue(), getReverse(), pruning, Double.BYTES) { @Override public int compare(int slot1, int slot2) { throw new UnsupportedOperationException(); diff --git a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java index 8042a7e296869..4f929a71429a6 100644 --- a/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java +++ b/server/src/test/java/org/opensearch/action/search/BottomSortValuesCollectorTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TotalHits; @@ -264,7 +265,7 @@ private Object[] newDateNanoArray(String... values) { private TopFieldDocs createTopDocs(SortField sortField, int totalHits, Object[] values) { FieldDoc[] fieldDocs = new FieldDoc[values.length]; - FieldComparator cmp = sortField.getComparator(1, false); + FieldComparator cmp = sortField.getComparator(1, Pruning.NONE); for (int i = 0; i < values.length; i++) { fieldDocs[i] = new FieldDoc(i, Float.NaN, new Object[] { values[i] }); } diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 6f9ff78aa0143..b31edd79411d0 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -35,7 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -68,28 +68,28 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(false); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene95Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene99Codec.class)); } public void testDefault() throws Exception { Codec codec = createCodecService(false).codec("default"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { Codec codec = createCodecService(false).codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); } public void testLZ4() throws Exception { Codec codec = createCodecService(false).codec("lz4"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); assert codec instanceof PerFieldMappingPostingFormatCodec; } public void testZlib() throws Exception { Codec codec = createCodecService(false).codec("zlib"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); assert codec instanceof PerFieldMappingPostingFormatCodec; } @@ -125,12 +125,12 @@ public void testLuceneCodecsWithCompressionLevel() { public void testDefaultMapperServiceNull() throws Exception { Codec codec = createCodecService(true).codec("default"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_SPEED, codec); } public void testBestCompressionMapperServiceNull() throws Exception { Codec codec = createCodecService(true).codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene99Codec.Mode.BEST_COMPRESSION, codec); } public void testExceptionCodecNull() { @@ -142,11 +142,11 @@ public void testExceptionIndexSettingsNull() { } // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Codec actual) throws Exception { + private void assertStoredFieldsCompressionEquals(Lucene99Codec.Mode expected, Codec actual) throws Exception { SegmentReader sr = getSegmentReader(actual); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); - assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); + assertEquals(expected, Lucene99Codec.Mode.valueOf(v)); } private CodecService createCodecService(boolean isMapperServiceNull) throws IOException { diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index b746d0ba8a56d..0c87c384e0749 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -32,14 +32,14 @@ package org.opensearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.suggest.document.Completion90PostingsFormat; +import org.apache.lucene.search.suggest.document.Completion99PostingsFormat; import org.apache.lucene.search.suggest.document.SuggestField; import org.apache.lucene.store.Directory; import org.opensearch.OpenSearchException; @@ -69,8 +69,8 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); - final PostingsFormat postingsFormat = new Completion90PostingsFormat(); - indexWriterConfig.setCodec(new Lucene95Codec() { + final PostingsFormat postingsFormat = new Completion99PostingsFormat(); + indexWriterConfig.setCodec(new Lucene99Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields diff --git a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java index 755d77c6ae392..00b48240d0567 100644 --- a/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/RangeFieldTypeTests.java @@ -277,12 +277,12 @@ public void testDateRangeQueryUsingMappingFormat() { RangeFieldType fieldType = new RangeFieldType("field", formatter); final Query query = fieldType.rangeQuery(from, to, true, true, relation, null, fieldType.dateMathParser(), context); - assertEquals("field:", query.toString()); + assertEquals("field:", ((IndexOrDocValuesQuery) query).getIndexQuery().toString()); // compare lower and upper bounds with what we would get on a `date` field DateFieldType dateFieldType = new DateFieldType("field", DateFieldMapper.Resolution.MILLISECONDS, formatter); final Query queryOnDateField = dateFieldType.rangeQuery(from, to, true, true, relation, null, fieldType.dateMathParser(), context); - assertEquals("field:[1465975790000 TO 1466062190999]", queryOnDateField.toString()); + assertEquals("field:[1465975790000 TO 1466062190999]", ((IndexOrDocValuesQuery) queryOnDateField).getIndexQuery().toString()); } /** diff --git a/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java index 1b62fef30d255..0dfe47e83fedc 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoDistanceQueryBuilderTests.java @@ -358,7 +358,10 @@ private void assertGeoDistanceRangeQuery(String query, double lat, double lon, d Query parsedQuery = parseQuery(query).toQuery(createShardContext()); // The parsedQuery contains IndexOrDocValuesQuery, which wraps LatLonPointDistanceQuery which in turn has default visibility, // so we cannot access its fields directly to check and have to use toString() here instead. - assertEquals(parsedQuery.toString(), "mapped_geo_point:" + lat + "," + lon + " +/- " + distanceUnit.toMeters(distance) + " meters"); + assertEquals( + ((IndexOrDocValuesQuery) parsedQuery).getIndexQuery().toString(), + "mapped_geo_point:" + lat + "," + lon + " +/- " + distanceUnit.toMeters(distance) + " meters" + ); } public void testFromJson() throws IOException { diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java index 1ae201666bc54..d68b0911d3d01 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationProcessorTests.java @@ -8,10 +8,13 @@ package org.opensearch.search.aggregations; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Collector; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; +import org.opensearch.index.engine.Engine; import org.opensearch.search.aggregations.bucket.global.GlobalAggregator; import org.opensearch.search.internal.ContextIndexSearcher; import org.opensearch.search.profile.query.CollectorResult; @@ -20,14 +23,17 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.List; +import java.util.concurrent.ExecutorService; import org.mockito.ArgumentMatchers; import static org.mockito.ArgumentMatchers.nullable; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; public class AggregationProcessorTests extends AggregationSetupTests { private final AggregationProcessor testAggregationProcessor = new ConcurrentAggregationProcessor(); @@ -152,9 +158,31 @@ private void testPostProcessCommon( globalCollectors.add(context.queryCollectorManagers().get(GlobalAggCollectorManager.class).newCollector()); } } - final ContextIndexSearcher testSearcher = mock(ContextIndexSearcher.class); final IndexSearcher.LeafSlice[] slicesToReturn = new IndexSearcher.LeafSlice[numSlices]; - when(testSearcher.getSlices()).thenReturn(slicesToReturn); + + // Build a ContextIndexSearcher that stubs slices to return slicesToReturn. Slices is protected in IndexReader + // so this builds a real object. The DirectoryReader fetched to build the object is not used for any searches. + final DirectoryReader reader; + try (Engine.Searcher searcher = context.indexShard().acquireSearcher("test")) { + reader = searcher.getDirectoryReader(); + } + ContextIndexSearcher testSearcher = spy( + new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + randomBoolean(), + mock(ExecutorService.class), + context + ) { + @Override + protected LeafSlice[] slices(List leaves) { + return slicesToReturn; + } + } + ); + ((TestSearchContext) context).setSearcher(testSearcher); AggregationCollectorManager collectorManager; if (expectedNonGlobalAggsPerSlice > 0) { @@ -164,8 +192,8 @@ private void testPostProcessCommon( if (expectedGlobalAggs > 0) { collectorManager = (AggregationCollectorManager) context.queryCollectorManagers().get(GlobalAggCollectorManager.class); ReduceableSearchResult result = collectorManager.reduce(globalCollectors); - when(testSearcher.search(nullable(Query.class), ArgumentMatchers.>any())) - .thenReturn(result); + doReturn(result).when(testSearcher) + .search(nullable(Query.class), ArgumentMatchers.>any()); } assertTrue(context.queryResult().hasAggs()); if (withProfilers) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java index 9358fa568db6a..01e259f84660d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/InternalTopHitsTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.index.IndexWriter; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldDoc; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; @@ -356,7 +357,7 @@ private Comparator sortFieldsComparator(SortField[] sortFields) { FieldComparator[] comparators = new FieldComparator[sortFields.length]; for (int i = 0; i < sortFields.length; i++) { // Values passed to getComparator shouldn't matter - comparators[i] = sortFields[i].getComparator(0, false); + comparators[i] = sortFields[i].getComparator(0, Pruning.NONE); } return (lhs, rhs) -> { FieldDoc l = (FieldDoc) lhs; diff --git a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java index b1f70dfce176c..a707c8b34e0a4 100644 --- a/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/opensearch/search/internal/ContextIndexSearcherTests.java @@ -393,8 +393,8 @@ public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception { null, searchContext ); - // Case 1: Verify getSlices return null when concurrent segment search is disabled - assertNull(searcher.getSlices()); + // Case 1: Verify getSlices returns not null when concurrent segment search is disabled + assertEquals(1, searcher.getSlices().length); // Case 2: Verify the slice count when custom max slice computation is used searcher = new ContextIndexSearcher( diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 39126a607f968..d0e01c5461c79 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -69,6 +69,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -659,7 +660,7 @@ public void testIndexSortScrollOptimization() throws Exception { @SuppressWarnings("unchecked") FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( 1, - false + Pruning.NONE ); int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); if (cmp == 0) { diff --git a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java index 48ac2d3b5a804..6af04e15acef0 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryProfilePhaseTests.java @@ -35,6 +35,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; @@ -1086,7 +1087,7 @@ public void testIndexSortScrollOptimization() throws Exception { @SuppressWarnings("unchecked") FieldComparator comparator = (FieldComparator) searchSortAndFormat.sort.getSort()[i].getComparator( i, - randomBoolean() + randomFrom(Pruning.values()) ); int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]); if (cmp == 0) { diff --git a/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java b/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java index cc5c205f7f9fe..53c07674f5bd7 100644 --- a/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/searchafter/SearchAfterBuilderTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.Pruning; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSortField; @@ -279,7 +280,7 @@ public SortField.Type reducedType() { } @Override - public FieldComparator newComparator(String fieldname, int numHits, boolean enableSkipping, boolean reversed) { + public FieldComparator newComparator(String fieldname, int numHits, Pruning pruning, boolean reversed) { return null; } From 439573b36bc82ddb77d37bfa3d0cd6f85c20c0e2 Mon Sep 17 00:00:00 2001 From: Harsha Vamsi Kalluri Date: Wed, 10 Jan 2024 05:49:11 -0800 Subject: [PATCH 55/81] Update skip version to match branches (#11801) Signed-off-by: Harsha Vamsi Kalluri From 81924884053f76325389f80118d4876554a6f94a Mon Sep 17 00:00:00 2001 From: Cam <17013462+camerondurham@users.noreply.github.com> Date: Wed, 10 Jan 2024 06:54:41 -0700 Subject: [PATCH 56/81] Single line comments can end in newline or EOF. Fixes #11815 (#11816) * Single line comments can end in newline or EOF. Fixes #11815 Signed-off-by: camerondurham * Re-add license to autogen parser files Signed-off-by: camerondurham * Remove noise from changelog Signed-off-by: Cameron Durham * Accept single-line comments until newline char Signed-off-by: Cameron Durham --------- Signed-off-by: camerondurham Signed-off-by: Cameron Durham --- CHANGELOG.md | 1 + .../src/main/antlr/PainlessLexer.g4 | 2 +- .../painless/antlr/PainlessLexer.java | 748 +++++++++--------- .../org/opensearch/painless/CommentTests.java | 51 ++ 4 files changed, 427 insertions(+), 375 deletions(-) create mode 100644 modules/lang-painless/src/test/java/org/opensearch/painless/CommentTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e0317c6d5475..1e00e42e5b756 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -218,6 +218,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) - Fix issue when calling Delete PIT endpoint and no PITs exist ([#11711](https://github.com/opensearch-project/OpenSearch/pull/11711)) - Fix tracing context propagation for local transport instrumentation ([#11490](https://github.com/opensearch-project/OpenSearch/pull/11490)) +- Fix parsing of single line comments in `lang-painless` ([#11815](https://github.com/opensearch-project/OpenSearch/issues/11815)) ### Security diff --git a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 index 21b03b85d8edd..69b789dd2aa25 100644 --- a/modules/lang-painless/src/main/antlr/PainlessLexer.g4 +++ b/modules/lang-painless/src/main/antlr/PainlessLexer.g4 @@ -25,7 +25,7 @@ protected abstract boolean isSlashRegex(); } WS: [ \t\n\r]+ -> skip; -COMMENT: ( '//' .*? [\n\r] | '/*' .*? '*/' ) -> skip; +COMMENT: ( '//' ~[\n\r]* | '/*' .*? '*/' ) -> skip; LBRACK: '{'; RBRACK: '}'; diff --git a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java index fb33cf6e2a6f5..260a2fc0c062c 100644 --- a/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java +++ b/modules/lang-painless/src/main/java/org/opensearch/painless/antlr/PainlessLexer.java @@ -435,7 +435,7 @@ private boolean REGEX_sempred(RuleContext _localctx, int predIndex) { return true; } - public static final String _serializedATN = "\u0004\u0000U\u0278\u0006\uffff\uffff\u0006\uffff\uffff\u0002\u0000\u0007" + public static final String _serializedATN = "\u0004\u0000U\u0277\u0006\uffff\uffff\u0006\uffff\uffff\u0002\u0000\u0007" + "\u0000\u0002\u0001\u0007\u0001\u0002\u0002\u0007\u0002\u0002\u0003\u0007" + "\u0003\u0002\u0004\u0007\u0004\u0002\u0005\u0007\u0005\u0002\u0006\u0007" + "\u0006\u0002\u0007\u0007\u0007\u0002\b\u0007\b\u0002\t\u0007\t\u0002\n" @@ -459,381 +459,381 @@ private boolean REGEX_sempred(RuleContext _localctx, int predIndex) { + "R\u0007R\u0002S\u0007S\u0002T\u0007T\u0001\u0000\u0004\u0000\u00ae\b\u0000" + "\u000b\u0000\f\u0000\u00af\u0001\u0000\u0001\u0000\u0001\u0001\u0001\u0001" + "\u0001\u0001\u0001\u0001\u0005\u0001\u00b8\b\u0001\n\u0001\f\u0001\u00bb" - + "\t\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005" - + "\u0001\u00c2\b\u0001\n\u0001\f\u0001\u00c5\t\u0001\u0001\u0001\u0001\u0001" - + "\u0003\u0001\u00c9\b\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002" - + "\u0001\u0003\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005" - + "\u0001\u0006\u0001\u0006\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001" - + "\b\u0001\b\u0001\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n\u0001" - + "\u000b\u0001\u000b\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001" - + "\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001" - + "\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001" - + "\u0010\u0001\u0010\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001" - + "\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001" - + "\u0012\u0001\u0012\u0001\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001" - + "\u0013\u0001\u0013\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001" - + "\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001" - + "\u0015\u0001\u0015\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001" - + "\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001" - + "\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001" - + "\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u001a\u0001" - + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001" - + "\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001" - + "\u001c\u0001\u001c\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001" - + "\u001e\u0001\u001f\u0001\u001f\u0001 \u0001 \u0001!\u0001!\u0001\"\u0001" - + "\"\u0001\"\u0001#\u0001#\u0001#\u0001$\u0001$\u0001$\u0001$\u0001%\u0001" - + "%\u0001&\u0001&\u0001&\u0001\'\u0001\'\u0001(\u0001(\u0001(\u0001)\u0001" - + ")\u0001)\u0001*\u0001*\u0001*\u0001*\u0001+\u0001+\u0001+\u0001,\u0001" - + ",\u0001,\u0001,\u0001-\u0001-\u0001.\u0001.\u0001/\u0001/\u00010\u0001" - + "0\u00010\u00011\u00011\u00011\u00012\u00012\u00013\u00013\u00014\u0001" - + "4\u00014\u00015\u00015\u00015\u00016\u00016\u00016\u00017\u00017\u0001" - + "7\u00018\u00018\u00018\u00018\u00019\u00019\u00019\u0001:\u0001:\u0001" - + ":\u0001;\u0001;\u0001<\u0001<\u0001<\u0001=\u0001=\u0001=\u0001>\u0001" - + ">\u0001>\u0001?\u0001?\u0001?\u0001@\u0001@\u0001@\u0001A\u0001A\u0001" - + "A\u0001B\u0001B\u0001B\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001" - + "D\u0001E\u0001E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001F\u0001F\u0001" - + "G\u0001G\u0004G\u01b8\bG\u000bG\fG\u01b9\u0001G\u0003G\u01bd\bG\u0001" - + "H\u0001H\u0001H\u0004H\u01c2\bH\u000bH\fH\u01c3\u0001H\u0003H\u01c7\b" - + "H\u0001I\u0001I\u0001I\u0005I\u01cc\bI\nI\fI\u01cf\tI\u0003I\u01d1\bI" - + "\u0001I\u0003I\u01d4\bI\u0001J\u0001J\u0001J\u0005J\u01d9\bJ\nJ\fJ\u01dc" - + "\tJ\u0003J\u01de\bJ\u0001J\u0001J\u0004J\u01e2\bJ\u000bJ\fJ\u01e3\u0003" - + "J\u01e6\bJ\u0001J\u0001J\u0003J\u01ea\bJ\u0001J\u0004J\u01ed\bJ\u000b" - + "J\fJ\u01ee\u0003J\u01f1\bJ\u0001J\u0003J\u01f4\bJ\u0001K\u0001K\u0001" - + "K\u0001K\u0001K\u0001K\u0005K\u01fc\bK\nK\fK\u01ff\tK\u0001K\u0001K\u0001" - + "K\u0001K\u0001K\u0001K\u0001K\u0005K\u0208\bK\nK\fK\u020b\tK\u0001K\u0003" - + "K\u020e\bK\u0001L\u0001L\u0001L\u0001L\u0004L\u0214\bL\u000bL\fL\u0215" - + "\u0001L\u0001L\u0005L\u021a\bL\nL\fL\u021d\tL\u0001L\u0001L\u0001M\u0001" - + "M\u0001M\u0001M\u0001M\u0001N\u0001N\u0001N\u0001N\u0001N\u0001N\u0001" - + "O\u0001O\u0001O\u0001O\u0001O\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" + + "\t\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0001\u0005\u0001\u00c1" + + "\b\u0001\n\u0001\f\u0001\u00c4\t\u0001\u0001\u0001\u0001\u0001\u0003\u0001" + + "\u00c8\b\u0001\u0001\u0001\u0001\u0001\u0001\u0002\u0001\u0002\u0001\u0003" + + "\u0001\u0003\u0001\u0004\u0001\u0004\u0001\u0005\u0001\u0005\u0001\u0006" + + "\u0001\u0006\u0001\u0007\u0001\u0007\u0001\b\u0001\b\u0001\b\u0001\b\u0001" + + "\t\u0001\t\u0001\t\u0001\t\u0001\t\u0001\n\u0001\n\u0001\u000b\u0001\u000b" + + "\u0001\f\u0001\f\u0001\f\u0001\r\u0001\r\u0001\r\u0001\u000e\u0001\u000e" + + "\u0001\u000e\u0001\u000e\u0001\u000e\u0001\u000f\u0001\u000f\u0001\u000f" + + "\u0001\u000f\u0001\u000f\u0001\u000f\u0001\u0010\u0001\u0010\u0001\u0010" + + "\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0011\u0001\u0012\u0001\u0012" + + "\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012\u0001\u0012" + + "\u0001\u0012\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013\u0001\u0013" + + "\u0001\u0013\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014\u0001\u0014" + + "\u0001\u0014\u0001\u0014\u0001\u0015\u0001\u0015\u0001\u0015\u0001\u0015" + + "\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0016\u0001\u0017\u0001\u0017" + + "\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0017\u0001\u0018\u0001\u0018" + + "\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0018\u0001\u0019\u0001\u0019" + + "\u0001\u0019\u0001\u0019\u0001\u0019\u0001\u001a\u0001\u001a\u0001\u001a" + + "\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a\u0001\u001a" + + "\u0001\u001a\u0001\u001a\u0001\u001b\u0001\u001b\u0001\u001c\u0001\u001c" + + "\u0001\u001d\u0001\u001d\u0001\u001e\u0001\u001e\u0001\u001e\u0001\u001f" + + "\u0001\u001f\u0001 \u0001 \u0001!\u0001!\u0001\"\u0001\"\u0001\"\u0001" + + "#\u0001#\u0001#\u0001$\u0001$\u0001$\u0001$\u0001%\u0001%\u0001&\u0001" + + "&\u0001&\u0001\'\u0001\'\u0001(\u0001(\u0001(\u0001)\u0001)\u0001)\u0001" + + "*\u0001*\u0001*\u0001*\u0001+\u0001+\u0001+\u0001,\u0001,\u0001,\u0001" + + ",\u0001-\u0001-\u0001.\u0001.\u0001/\u0001/\u00010\u00010\u00010\u0001" + + "1\u00011\u00011\u00012\u00012\u00013\u00013\u00014\u00014\u00014\u0001" + + "5\u00015\u00015\u00016\u00016\u00016\u00017\u00017\u00017\u00018\u0001" + + "8\u00018\u00018\u00019\u00019\u00019\u0001:\u0001:\u0001:\u0001;\u0001" + + ";\u0001<\u0001<\u0001<\u0001=\u0001=\u0001=\u0001>\u0001>\u0001>\u0001" + + "?\u0001?\u0001?\u0001@\u0001@\u0001@\u0001A\u0001A\u0001A\u0001B\u0001" + + "B\u0001B\u0001C\u0001C\u0001C\u0001D\u0001D\u0001D\u0001D\u0001E\u0001" + + "E\u0001E\u0001E\u0001F\u0001F\u0001F\u0001F\u0001F\u0001G\u0001G\u0004" + + "G\u01b7\bG\u000bG\fG\u01b8\u0001G\u0003G\u01bc\bG\u0001H\u0001H\u0001" + + "H\u0004H\u01c1\bH\u000bH\fH\u01c2\u0001H\u0003H\u01c6\bH\u0001I\u0001" + + "I\u0001I\u0005I\u01cb\bI\nI\fI\u01ce\tI\u0003I\u01d0\bI\u0001I\u0003I" + + "\u01d3\bI\u0001J\u0001J\u0001J\u0005J\u01d8\bJ\nJ\fJ\u01db\tJ\u0003J\u01dd" + + "\bJ\u0001J\u0001J\u0004J\u01e1\bJ\u000bJ\fJ\u01e2\u0003J\u01e5\bJ\u0001" + + "J\u0001J\u0003J\u01e9\bJ\u0001J\u0004J\u01ec\bJ\u000bJ\fJ\u01ed\u0003" + + "J\u01f0\bJ\u0001J\u0003J\u01f3\bJ\u0001K\u0001K\u0001K\u0001K\u0001K\u0001" + + "K\u0005K\u01fb\bK\nK\fK\u01fe\tK\u0001K\u0001K\u0001K\u0001K\u0001K\u0001" + + "K\u0001K\u0005K\u0207\bK\nK\fK\u020a\tK\u0001K\u0003K\u020d\bK\u0001L" + + "\u0001L\u0001L\u0001L\u0004L\u0213\bL\u000bL\fL\u0214\u0001L\u0001L\u0005" + + "L\u0219\bL\nL\fL\u021c\tL\u0001L\u0001L\u0001M\u0001M\u0001M\u0001M\u0001" + + "M\u0001N\u0001N\u0001N\u0001N\u0001N\u0001N\u0001O\u0001O\u0001O\u0001" + + "O\u0001O\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" - + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001" - + "P\u0001P\u0001P\u0003P\u0257\bP\u0001Q\u0001Q\u0001Q\u0001Q\u0001R\u0001" - + "R\u0005R\u025f\bR\nR\fR\u0262\tR\u0001S\u0001S\u0001S\u0005S\u0267\bS" - + "\nS\fS\u026a\tS\u0003S\u026c\bS\u0001S\u0001S\u0001T\u0001T\u0005T\u0272" - + "\bT\nT\fT\u0275\tT\u0001T\u0001T\u0005\u00b9\u00c3\u01fd\u0209\u0215\u0000" - + "U\u0002\u0001\u0004\u0002\u0006\u0003\b\u0004\n\u0005\f\u0006\u000e\u0007" - + "\u0010\b\u0012\t\u0014\n\u0016\u000b\u0018\f\u001a\r\u001c\u000e\u001e" - + "\u000f \u0010\"\u0011$\u0012&\u0013(\u0014*\u0015,\u0016.\u00170\u0018" - + "2\u00194\u001a6\u001b8\u001c:\u001d<\u001e>\u001f@ B!D\"F#H$J%L&N\'P(" - + "R)T*V+X,Z-\\.^/`0b1d2f3h4j5l6n7p8r9t:v;x~?\u0080@\u0082A\u0084B\u0086" - + "C\u0088D\u008aE\u008cF\u008eG\u0090H\u0092I\u0094J\u0096K\u0098L\u009a" - + "M\u009cN\u009eO\u00a0P\u00a2Q\u00a4R\u00a6S\u00a8T\u00aaU\u0002\u0000" - + "\u0001\u0013\u0003\u0000\t\n\r\r \u0002\u0000\n\n\r\r\u0001\u000007\u0002" - + "\u0000LLll\u0002\u0000XXxx\u0003\u000009AFaf\u0001\u000019\u0001\u0000" - + "09\u0006\u0000DDFFLLddffll\u0002\u0000EEee\u0002\u0000++--\u0004\u0000" - + "DDFFddff\u0002\u0000\"\"\\\\\u0002\u0000\'\'\\\\\u0001\u0000\n\n\u0002" - + "\u0000\n\n//\u0007\u0000UUcciilmssuuxx\u0003\u0000AZ__az\u0004\u00000" - + "9AZ__az\u029e\u0000\u0002\u0001\u0000\u0000\u0000\u0000\u0004\u0001\u0000" - + "\u0000\u0000\u0000\u0006\u0001\u0000\u0000\u0000\u0000\b\u0001\u0000\u0000" - + "\u0000\u0000\n\u0001\u0000\u0000\u0000\u0000\f\u0001\u0000\u0000\u0000" - + "\u0000\u000e\u0001\u0000\u0000\u0000\u0000\u0010\u0001\u0000\u0000\u0000" - + "\u0000\u0012\u0001\u0000\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000" - + "\u0000\u0016\u0001\u0000\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000" - + "\u0000\u001a\u0001\u0000\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000" - + "\u0000\u001e\u0001\u0000\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000" - + "\"\u0001\u0000\u0000\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001" - + "\u0000\u0000\u0000\u0000(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000" - + "\u0000\u0000,\u0001\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u0000" - + "0\u0001\u0000\u0000\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001" - + "\u0000\u0000\u0000\u00006\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000" - + "\u0000\u0000:\u0001\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000" - + ">\u0001\u0000\u0000\u0000\u0000@\u0001\u0000\u0000\u0000\u0000B\u0001" - + "\u0000\u0000\u0000\u0000D\u0001\u0000\u0000\u0000\u0000F\u0001\u0000\u0000" - + "\u0000\u0000H\u0001\u0000\u0000\u0000\u0000J\u0001\u0000\u0000\u0000\u0000" - + "L\u0001\u0000\u0000\u0000\u0000N\u0001\u0000\u0000\u0000\u0000P\u0001" - + "\u0000\u0000\u0000\u0000R\u0001\u0000\u0000\u0000\u0000T\u0001\u0000\u0000" - + "\u0000\u0000V\u0001\u0000\u0000\u0000\u0000X\u0001\u0000\u0000\u0000\u0000" - + "Z\u0001\u0000\u0000\u0000\u0000\\\u0001\u0000\u0000\u0000\u0000^\u0001" - + "\u0000\u0000\u0000\u0000`\u0001\u0000\u0000\u0000\u0000b\u0001\u0000\u0000" - + "\u0000\u0000d\u0001\u0000\u0000\u0000\u0000f\u0001\u0000\u0000\u0000\u0000" - + "h\u0001\u0000\u0000\u0000\u0000j\u0001\u0000\u0000\u0000\u0000l\u0001" - + "\u0000\u0000\u0000\u0000n\u0001\u0000\u0000\u0000\u0000p\u0001\u0000\u0000" - + "\u0000\u0000r\u0001\u0000\u0000\u0000\u0000t\u0001\u0000\u0000\u0000\u0000" - + "v\u0001\u0000\u0000\u0000\u0000x\u0001\u0000\u0000\u0000\u0000z\u0001" - + "\u0000\u0000\u0000\u0000|\u0001\u0000\u0000\u0000\u0000~\u0001\u0000\u0000" - + "\u0000\u0000\u0080\u0001\u0000\u0000\u0000\u0000\u0082\u0001\u0000\u0000" - + "\u0000\u0000\u0084\u0001\u0000\u0000\u0000\u0000\u0086\u0001\u0000\u0000" - + "\u0000\u0000\u0088\u0001\u0000\u0000\u0000\u0000\u008a\u0001\u0000\u0000" - + "\u0000\u0000\u008c\u0001\u0000\u0000\u0000\u0000\u008e\u0001\u0000\u0000" - + "\u0000\u0000\u0090\u0001\u0000\u0000\u0000\u0000\u0092\u0001\u0000\u0000" - + "\u0000\u0000\u0094\u0001\u0000\u0000\u0000\u0000\u0096\u0001\u0000\u0000" - + "\u0000\u0000\u0098\u0001\u0000\u0000\u0000\u0000\u009a\u0001\u0000\u0000" - + "\u0000\u0000\u009c\u0001\u0000\u0000\u0000\u0000\u009e\u0001\u0000\u0000" - + "\u0000\u0000\u00a0\u0001\u0000\u0000\u0000\u0000\u00a2\u0001\u0000\u0000" - + "\u0000\u0000\u00a4\u0001\u0000\u0000\u0000\u0000\u00a6\u0001\u0000\u0000" - + "\u0000\u0001\u00a8\u0001\u0000\u0000\u0000\u0001\u00aa\u0001\u0000\u0000" - + "\u0000\u0002\u00ad\u0001\u0000\u0000\u0000\u0004\u00c8\u0001\u0000\u0000" - + "\u0000\u0006\u00cc\u0001\u0000\u0000\u0000\b\u00ce\u0001\u0000\u0000\u0000" - + "\n\u00d0\u0001\u0000\u0000\u0000\f\u00d2\u0001\u0000\u0000\u0000\u000e" - + "\u00d4\u0001\u0000\u0000\u0000\u0010\u00d6\u0001\u0000\u0000\u0000\u0012" - + "\u00d8\u0001\u0000\u0000\u0000\u0014\u00dc\u0001\u0000\u0000\u0000\u0016" - + "\u00e1\u0001\u0000\u0000\u0000\u0018\u00e3\u0001\u0000\u0000\u0000\u001a" - + "\u00e5\u0001\u0000\u0000\u0000\u001c\u00e8\u0001\u0000\u0000\u0000\u001e" - + "\u00eb\u0001\u0000\u0000\u0000 \u00f0\u0001\u0000\u0000\u0000\"\u00f6" - + "\u0001\u0000\u0000\u0000$\u00f9\u0001\u0000\u0000\u0000&\u00fd\u0001\u0000" - + "\u0000\u0000(\u0106\u0001\u0000\u0000\u0000*\u010c\u0001\u0000\u0000\u0000" - + ",\u0113\u0001\u0000\u0000\u0000.\u0117\u0001\u0000\u0000\u00000\u011b" - + "\u0001\u0000\u0000\u00002\u0121\u0001\u0000\u0000\u00004\u0127\u0001\u0000" - + "\u0000\u00006\u012c\u0001\u0000\u0000\u00008\u0137\u0001\u0000\u0000\u0000" - + ":\u0139\u0001\u0000\u0000\u0000<\u013b\u0001\u0000\u0000\u0000>\u013d" - + "\u0001\u0000\u0000\u0000@\u0140\u0001\u0000\u0000\u0000B\u0142\u0001\u0000" - + "\u0000\u0000D\u0144\u0001\u0000\u0000\u0000F\u0146\u0001\u0000\u0000\u0000" - + "H\u0149\u0001\u0000\u0000\u0000J\u014c\u0001\u0000\u0000\u0000L\u0150" - + "\u0001\u0000\u0000\u0000N\u0152\u0001\u0000\u0000\u0000P\u0155\u0001\u0000" - + "\u0000\u0000R\u0157\u0001\u0000\u0000\u0000T\u015a\u0001\u0000\u0000\u0000" - + "V\u015d\u0001\u0000\u0000\u0000X\u0161\u0001\u0000\u0000\u0000Z\u0164" - + "\u0001\u0000\u0000\u0000\\\u0168\u0001\u0000\u0000\u0000^\u016a\u0001" - + "\u0000\u0000\u0000`\u016c\u0001\u0000\u0000\u0000b\u016e\u0001\u0000\u0000" - + "\u0000d\u0171\u0001\u0000\u0000\u0000f\u0174\u0001\u0000\u0000\u0000h" - + "\u0176\u0001\u0000\u0000\u0000j\u0178\u0001\u0000\u0000\u0000l\u017b\u0001" - + "\u0000\u0000\u0000n\u017e\u0001\u0000\u0000\u0000p\u0181\u0001\u0000\u0000" - + "\u0000r\u0184\u0001\u0000\u0000\u0000t\u0188\u0001\u0000\u0000\u0000v" - + "\u018b\u0001\u0000\u0000\u0000x\u018e\u0001\u0000\u0000\u0000z\u0190\u0001" - + "\u0000\u0000\u0000|\u0193\u0001\u0000\u0000\u0000~\u0196\u0001\u0000\u0000" - + "\u0000\u0080\u0199\u0001\u0000\u0000\u0000\u0082\u019c\u0001\u0000\u0000" - + "\u0000\u0084\u019f\u0001\u0000\u0000\u0000\u0086\u01a2\u0001\u0000\u0000" - + "\u0000\u0088\u01a5\u0001\u0000\u0000\u0000\u008a\u01a8\u0001\u0000\u0000" - + "\u0000\u008c\u01ac\u0001\u0000\u0000\u0000\u008e\u01b0\u0001\u0000\u0000" - + "\u0000\u0090\u01b5\u0001\u0000\u0000\u0000\u0092\u01be\u0001\u0000\u0000" - + "\u0000\u0094\u01d0\u0001\u0000\u0000\u0000\u0096\u01dd\u0001\u0000\u0000" - + "\u0000\u0098\u020d\u0001\u0000\u0000\u0000\u009a\u020f\u0001\u0000\u0000" - + "\u0000\u009c\u0220\u0001\u0000\u0000\u0000\u009e\u0225\u0001\u0000\u0000" - + "\u0000\u00a0\u022b\u0001\u0000\u0000\u0000\u00a2\u0256\u0001\u0000\u0000" - + "\u0000\u00a4\u0258\u0001\u0000\u0000\u0000\u00a6\u025c\u0001\u0000\u0000" - + "\u0000\u00a8\u026b\u0001\u0000\u0000\u0000\u00aa\u026f\u0001\u0000\u0000" - + "\u0000\u00ac\u00ae\u0007\u0000\u0000\u0000\u00ad\u00ac\u0001\u0000\u0000" - + "\u0000\u00ae\u00af\u0001\u0000\u0000\u0000\u00af\u00ad\u0001\u0000\u0000" - + "\u0000\u00af\u00b0\u0001\u0000\u0000\u0000\u00b0\u00b1\u0001\u0000\u0000" - + "\u0000\u00b1\u00b2\u0006\u0000\u0000\u0000\u00b2\u0003\u0001\u0000\u0000" - + "\u0000\u00b3\u00b4\u0005/\u0000\u0000\u00b4\u00b5\u0005/\u0000\u0000\u00b5" - + "\u00b9\u0001\u0000\u0000\u0000\u00b6\u00b8\t\u0000\u0000\u0000\u00b7\u00b6" - + "\u0001\u0000\u0000\u0000\u00b8\u00bb\u0001\u0000\u0000\u0000\u00b9\u00ba" - + "\u0001\u0000\u0000\u0000\u00b9\u00b7\u0001\u0000\u0000\u0000\u00ba\u00bc" - + "\u0001\u0000\u0000\u0000\u00bb\u00b9\u0001\u0000\u0000\u0000\u00bc\u00c9" - + "\u0007\u0001\u0000\u0000\u00bd\u00be\u0005/\u0000\u0000\u00be\u00bf\u0005" - + "*\u0000\u0000\u00bf\u00c3\u0001\u0000\u0000\u0000\u00c0\u00c2\t\u0000" - + "\u0000\u0000\u00c1\u00c0\u0001\u0000\u0000\u0000\u00c2\u00c5\u0001\u0000" - + "\u0000\u0000\u00c3\u00c4\u0001\u0000\u0000\u0000\u00c3\u00c1\u0001\u0000" - + "\u0000\u0000\u00c4\u00c6\u0001\u0000\u0000\u0000\u00c5\u00c3\u0001\u0000" - + "\u0000\u0000\u00c6\u00c7\u0005*\u0000\u0000\u00c7\u00c9\u0005/\u0000\u0000" - + "\u00c8\u00b3\u0001\u0000\u0000\u0000\u00c8\u00bd\u0001\u0000\u0000\u0000" - + "\u00c9\u00ca\u0001\u0000\u0000\u0000\u00ca\u00cb\u0006\u0001\u0000\u0000" - + "\u00cb\u0005\u0001\u0000\u0000\u0000\u00cc\u00cd\u0005{\u0000\u0000\u00cd" - + "\u0007\u0001\u0000\u0000\u0000\u00ce\u00cf\u0005}\u0000\u0000\u00cf\t" - + "\u0001\u0000\u0000\u0000\u00d0\u00d1\u0005[\u0000\u0000\u00d1\u000b\u0001" - + "\u0000\u0000\u0000\u00d2\u00d3\u0005]\u0000\u0000\u00d3\r\u0001\u0000" - + "\u0000\u0000\u00d4\u00d5\u0005(\u0000\u0000\u00d5\u000f\u0001\u0000\u0000" - + "\u0000\u00d6\u00d7\u0005)\u0000\u0000\u00d7\u0011\u0001\u0000\u0000\u0000" - + "\u00d8\u00d9\u0005.\u0000\u0000\u00d9\u00da\u0001\u0000\u0000\u0000\u00da" - + "\u00db\u0006\b\u0001\u0000\u00db\u0013\u0001\u0000\u0000\u0000\u00dc\u00dd" - + "\u0005?\u0000\u0000\u00dd\u00de\u0005.\u0000\u0000\u00de\u00df\u0001\u0000" - + "\u0000\u0000\u00df\u00e0\u0006\t\u0001\u0000\u00e0\u0015\u0001\u0000\u0000" - + "\u0000\u00e1\u00e2\u0005,\u0000\u0000\u00e2\u0017\u0001\u0000\u0000\u0000" - + "\u00e3\u00e4\u0005;\u0000\u0000\u00e4\u0019\u0001\u0000\u0000\u0000\u00e5" - + "\u00e6\u0005i\u0000\u0000\u00e6\u00e7\u0005f\u0000\u0000\u00e7\u001b\u0001" - + "\u0000\u0000\u0000\u00e8\u00e9\u0005i\u0000\u0000\u00e9\u00ea\u0005n\u0000" - + "\u0000\u00ea\u001d\u0001\u0000\u0000\u0000\u00eb\u00ec\u0005e\u0000\u0000" - + "\u00ec\u00ed\u0005l\u0000\u0000\u00ed\u00ee\u0005s\u0000\u0000\u00ee\u00ef" - + "\u0005e\u0000\u0000\u00ef\u001f\u0001\u0000\u0000\u0000\u00f0\u00f1\u0005" - + "w\u0000\u0000\u00f1\u00f2\u0005h\u0000\u0000\u00f2\u00f3\u0005i\u0000" - + "\u0000\u00f3\u00f4\u0005l\u0000\u0000\u00f4\u00f5\u0005e\u0000\u0000\u00f5" - + "!\u0001\u0000\u0000\u0000\u00f6\u00f7\u0005d\u0000\u0000\u00f7\u00f8\u0005" - + "o\u0000\u0000\u00f8#\u0001\u0000\u0000\u0000\u00f9\u00fa\u0005f\u0000" - + "\u0000\u00fa\u00fb\u0005o\u0000\u0000\u00fb\u00fc\u0005r\u0000\u0000\u00fc" - + "%\u0001\u0000\u0000\u0000\u00fd\u00fe\u0005c\u0000\u0000\u00fe\u00ff\u0005" - + "o\u0000\u0000\u00ff\u0100\u0005n\u0000\u0000\u0100\u0101\u0005t\u0000" - + "\u0000\u0101\u0102\u0005i\u0000\u0000\u0102\u0103\u0005n\u0000\u0000\u0103" - + "\u0104\u0005u\u0000\u0000\u0104\u0105\u0005e\u0000\u0000\u0105\'\u0001" - + "\u0000\u0000\u0000\u0106\u0107\u0005b\u0000\u0000\u0107\u0108\u0005r\u0000" - + "\u0000\u0108\u0109\u0005e\u0000\u0000\u0109\u010a\u0005a\u0000\u0000\u010a" - + "\u010b\u0005k\u0000\u0000\u010b)\u0001\u0000\u0000\u0000\u010c\u010d\u0005" - + "r\u0000\u0000\u010d\u010e\u0005e\u0000\u0000\u010e\u010f\u0005t\u0000" - + "\u0000\u010f\u0110\u0005u\u0000\u0000\u0110\u0111\u0005r\u0000\u0000\u0111" - + "\u0112\u0005n\u0000\u0000\u0112+\u0001\u0000\u0000\u0000\u0113\u0114\u0005" - + "n\u0000\u0000\u0114\u0115\u0005e\u0000\u0000\u0115\u0116\u0005w\u0000" - + "\u0000\u0116-\u0001\u0000\u0000\u0000\u0117\u0118\u0005t\u0000\u0000\u0118" - + "\u0119\u0005r\u0000\u0000\u0119\u011a\u0005y\u0000\u0000\u011a/\u0001" - + "\u0000\u0000\u0000\u011b\u011c\u0005c\u0000\u0000\u011c\u011d\u0005a\u0000" - + "\u0000\u011d\u011e\u0005t\u0000\u0000\u011e\u011f\u0005c\u0000\u0000\u011f" - + "\u0120\u0005h\u0000\u0000\u01201\u0001\u0000\u0000\u0000\u0121\u0122\u0005" - + "t\u0000\u0000\u0122\u0123\u0005h\u0000\u0000\u0123\u0124\u0005r\u0000" - + "\u0000\u0124\u0125\u0005o\u0000\u0000\u0125\u0126\u0005w\u0000\u0000\u0126" - + "3\u0001\u0000\u0000\u0000\u0127\u0128\u0005t\u0000\u0000\u0128\u0129\u0005" - + "h\u0000\u0000\u0129\u012a\u0005i\u0000\u0000\u012a\u012b\u0005s\u0000" - + "\u0000\u012b5\u0001\u0000\u0000\u0000\u012c\u012d\u0005i\u0000\u0000\u012d" - + "\u012e\u0005n\u0000\u0000\u012e\u012f\u0005s\u0000\u0000\u012f\u0130\u0005" - + "t\u0000\u0000\u0130\u0131\u0005a\u0000\u0000\u0131\u0132\u0005n\u0000" - + "\u0000\u0132\u0133\u0005c\u0000\u0000\u0133\u0134\u0005e\u0000\u0000\u0134" - + "\u0135\u0005o\u0000\u0000\u0135\u0136\u0005f\u0000\u0000\u01367\u0001" - + "\u0000\u0000\u0000\u0137\u0138\u0005!\u0000\u0000\u01389\u0001\u0000\u0000" - + "\u0000\u0139\u013a\u0005~\u0000\u0000\u013a;\u0001\u0000\u0000\u0000\u013b" - + "\u013c\u0005*\u0000\u0000\u013c=\u0001\u0000\u0000\u0000\u013d\u013e\u0005" - + "/\u0000\u0000\u013e\u013f\u0004\u001e\u0000\u0000\u013f?\u0001\u0000\u0000" - + "\u0000\u0140\u0141\u0005%\u0000\u0000\u0141A\u0001\u0000\u0000\u0000\u0142" - + "\u0143\u0005+\u0000\u0000\u0143C\u0001\u0000\u0000\u0000\u0144\u0145\u0005" - + "-\u0000\u0000\u0145E\u0001\u0000\u0000\u0000\u0146\u0147\u0005<\u0000" - + "\u0000\u0147\u0148\u0005<\u0000\u0000\u0148G\u0001\u0000\u0000\u0000\u0149" - + "\u014a\u0005>\u0000\u0000\u014a\u014b\u0005>\u0000\u0000\u014bI\u0001" - + "\u0000\u0000\u0000\u014c\u014d\u0005>\u0000\u0000\u014d\u014e\u0005>\u0000" - + "\u0000\u014e\u014f\u0005>\u0000\u0000\u014fK\u0001\u0000\u0000\u0000\u0150" - + "\u0151\u0005<\u0000\u0000\u0151M\u0001\u0000\u0000\u0000\u0152\u0153\u0005" - + "<\u0000\u0000\u0153\u0154\u0005=\u0000\u0000\u0154O\u0001\u0000\u0000" - + "\u0000\u0155\u0156\u0005>\u0000\u0000\u0156Q\u0001\u0000\u0000\u0000\u0157" - + "\u0158\u0005>\u0000\u0000\u0158\u0159\u0005=\u0000\u0000\u0159S\u0001" - + "\u0000\u0000\u0000\u015a\u015b\u0005=\u0000\u0000\u015b\u015c\u0005=\u0000" - + "\u0000\u015cU\u0001\u0000\u0000\u0000\u015d\u015e\u0005=\u0000\u0000\u015e" - + "\u015f\u0005=\u0000\u0000\u015f\u0160\u0005=\u0000\u0000\u0160W\u0001" - + "\u0000\u0000\u0000\u0161\u0162\u0005!\u0000\u0000\u0162\u0163\u0005=\u0000" - + "\u0000\u0163Y\u0001\u0000\u0000\u0000\u0164\u0165\u0005!\u0000\u0000\u0165" - + "\u0166\u0005=\u0000\u0000\u0166\u0167\u0005=\u0000\u0000\u0167[\u0001" - + "\u0000\u0000\u0000\u0168\u0169\u0005&\u0000\u0000\u0169]\u0001\u0000\u0000" - + "\u0000\u016a\u016b\u0005^\u0000\u0000\u016b_\u0001\u0000\u0000\u0000\u016c" - + "\u016d\u0005|\u0000\u0000\u016da\u0001\u0000\u0000\u0000\u016e\u016f\u0005" - + "&\u0000\u0000\u016f\u0170\u0005&\u0000\u0000\u0170c\u0001\u0000\u0000" - + "\u0000\u0171\u0172\u0005|\u0000\u0000\u0172\u0173\u0005|\u0000\u0000\u0173" - + "e\u0001\u0000\u0000\u0000\u0174\u0175\u0005?\u0000\u0000\u0175g\u0001" - + "\u0000\u0000\u0000\u0176\u0177\u0005:\u0000\u0000\u0177i\u0001\u0000\u0000" - + "\u0000\u0178\u0179\u0005?\u0000\u0000\u0179\u017a\u0005:\u0000\u0000\u017a" - + "k\u0001\u0000\u0000\u0000\u017b\u017c\u0005:\u0000\u0000\u017c\u017d\u0005" - + ":\u0000\u0000\u017dm\u0001\u0000\u0000\u0000\u017e\u017f\u0005-\u0000" - + "\u0000\u017f\u0180\u0005>\u0000\u0000\u0180o\u0001\u0000\u0000\u0000\u0181" - + "\u0182\u0005=\u0000\u0000\u0182\u0183\u0005~\u0000\u0000\u0183q\u0001" - + "\u0000\u0000\u0000\u0184\u0185\u0005=\u0000\u0000\u0185\u0186\u0005=\u0000" - + "\u0000\u0186\u0187\u0005~\u0000\u0000\u0187s\u0001\u0000\u0000\u0000\u0188" - + "\u0189\u0005+\u0000\u0000\u0189\u018a\u0005+\u0000\u0000\u018au\u0001" - + "\u0000\u0000\u0000\u018b\u018c\u0005-\u0000\u0000\u018c\u018d\u0005-\u0000" - + "\u0000\u018dw\u0001\u0000\u0000\u0000\u018e\u018f\u0005=\u0000\u0000\u018f" - + "y\u0001\u0000\u0000\u0000\u0190\u0191\u0005+\u0000\u0000\u0191\u0192\u0005" - + "=\u0000\u0000\u0192{\u0001\u0000\u0000\u0000\u0193\u0194\u0005-\u0000" - + "\u0000\u0194\u0195\u0005=\u0000\u0000\u0195}\u0001\u0000\u0000\u0000\u0196" - + "\u0197\u0005*\u0000\u0000\u0197\u0198\u0005=\u0000\u0000\u0198\u007f\u0001" - + "\u0000\u0000\u0000\u0199\u019a\u0005/\u0000\u0000\u019a\u019b\u0005=\u0000" - + "\u0000\u019b\u0081\u0001\u0000\u0000\u0000\u019c\u019d\u0005%\u0000\u0000" - + "\u019d\u019e\u0005=\u0000\u0000\u019e\u0083\u0001\u0000\u0000\u0000\u019f" - + "\u01a0\u0005&\u0000\u0000\u01a0\u01a1\u0005=\u0000\u0000\u01a1\u0085\u0001" - + "\u0000\u0000\u0000\u01a2\u01a3\u0005^\u0000\u0000\u01a3\u01a4\u0005=\u0000" - + "\u0000\u01a4\u0087\u0001\u0000\u0000\u0000\u01a5\u01a6\u0005|\u0000\u0000" - + "\u01a6\u01a7\u0005=\u0000\u0000\u01a7\u0089\u0001\u0000\u0000\u0000\u01a8" - + "\u01a9\u0005<\u0000\u0000\u01a9\u01aa\u0005<\u0000\u0000\u01aa\u01ab\u0005" - + "=\u0000\u0000\u01ab\u008b\u0001\u0000\u0000\u0000\u01ac\u01ad\u0005>\u0000" - + "\u0000\u01ad\u01ae\u0005>\u0000\u0000\u01ae\u01af\u0005=\u0000\u0000\u01af" - + "\u008d\u0001\u0000\u0000\u0000\u01b0\u01b1\u0005>\u0000\u0000\u01b1\u01b2" - + "\u0005>\u0000\u0000\u01b2\u01b3\u0005>\u0000\u0000\u01b3\u01b4\u0005=" - + "\u0000\u0000\u01b4\u008f\u0001\u0000\u0000\u0000\u01b5\u01b7\u00050\u0000" - + "\u0000\u01b6\u01b8\u0007\u0002\u0000\u0000\u01b7\u01b6\u0001\u0000\u0000" - + "\u0000\u01b8\u01b9\u0001\u0000\u0000\u0000\u01b9\u01b7\u0001\u0000\u0000" - + "\u0000\u01b9\u01ba\u0001\u0000\u0000\u0000\u01ba\u01bc\u0001\u0000\u0000" - + "\u0000\u01bb\u01bd\u0007\u0003\u0000\u0000\u01bc\u01bb\u0001\u0000\u0000" - + "\u0000\u01bc\u01bd\u0001\u0000\u0000\u0000\u01bd\u0091\u0001\u0000\u0000" - + "\u0000\u01be\u01bf\u00050\u0000\u0000\u01bf\u01c1\u0007\u0004\u0000\u0000" - + "\u01c0\u01c2\u0007\u0005\u0000\u0000\u01c1\u01c0\u0001\u0000\u0000\u0000" - + "\u01c2\u01c3\u0001\u0000\u0000\u0000\u01c3\u01c1\u0001\u0000\u0000\u0000" - + "\u01c3\u01c4\u0001\u0000\u0000\u0000\u01c4\u01c6\u0001\u0000\u0000\u0000" - + "\u01c5\u01c7\u0007\u0003\u0000\u0000\u01c6\u01c5\u0001\u0000\u0000\u0000" - + "\u01c6\u01c7\u0001\u0000\u0000\u0000\u01c7\u0093\u0001\u0000\u0000\u0000" - + "\u01c8\u01d1\u00050\u0000\u0000\u01c9\u01cd\u0007\u0006\u0000\u0000\u01ca" - + "\u01cc\u0007\u0007\u0000\u0000\u01cb\u01ca\u0001\u0000\u0000\u0000\u01cc" - + "\u01cf\u0001\u0000\u0000\u0000\u01cd\u01cb\u0001\u0000\u0000\u0000\u01cd" - + "\u01ce\u0001\u0000\u0000\u0000\u01ce\u01d1\u0001\u0000\u0000\u0000\u01cf" - + "\u01cd\u0001\u0000\u0000\u0000\u01d0\u01c8\u0001\u0000\u0000\u0000\u01d0" - + "\u01c9\u0001\u0000\u0000\u0000\u01d1\u01d3\u0001\u0000\u0000\u0000\u01d2" - + "\u01d4\u0007\b\u0000\u0000\u01d3\u01d2\u0001\u0000\u0000\u0000\u01d3\u01d4" - + "\u0001\u0000\u0000\u0000\u01d4\u0095\u0001\u0000\u0000\u0000\u01d5\u01de" - + "\u00050\u0000\u0000\u01d6\u01da\u0007\u0006\u0000\u0000\u01d7\u01d9\u0007" - + "\u0007\u0000\u0000\u01d8\u01d7\u0001\u0000\u0000\u0000\u01d9\u01dc\u0001" - + "\u0000\u0000\u0000\u01da\u01d8\u0001\u0000\u0000\u0000\u01da\u01db\u0001" - + "\u0000\u0000\u0000\u01db\u01de\u0001\u0000\u0000\u0000\u01dc\u01da\u0001" - + "\u0000\u0000\u0000\u01dd\u01d5\u0001\u0000\u0000\u0000\u01dd\u01d6\u0001" - + "\u0000\u0000\u0000\u01de\u01e5\u0001\u0000\u0000\u0000\u01df\u01e1\u0003" - + "\u0012\b\u0000\u01e0\u01e2\u0007\u0007\u0000\u0000\u01e1\u01e0\u0001\u0000" - + "\u0000\u0000\u01e2\u01e3\u0001\u0000\u0000\u0000\u01e3\u01e1\u0001\u0000" - + "\u0000\u0000\u01e3\u01e4\u0001\u0000\u0000\u0000\u01e4\u01e6\u0001\u0000" - + "\u0000\u0000\u01e5\u01df\u0001\u0000\u0000\u0000\u01e5\u01e6\u0001\u0000" - + "\u0000\u0000\u01e6\u01f0\u0001\u0000\u0000\u0000\u01e7\u01e9\u0007\t\u0000" - + "\u0000\u01e8\u01ea\u0007\n\u0000\u0000\u01e9\u01e8\u0001\u0000\u0000\u0000" - + "\u01e9\u01ea\u0001\u0000\u0000\u0000\u01ea\u01ec\u0001\u0000\u0000\u0000" - + "\u01eb\u01ed\u0007\u0007\u0000\u0000\u01ec\u01eb\u0001\u0000\u0000\u0000" - + "\u01ed\u01ee\u0001\u0000\u0000\u0000\u01ee\u01ec\u0001\u0000\u0000\u0000" - + "\u01ee\u01ef\u0001\u0000\u0000\u0000\u01ef\u01f1\u0001\u0000\u0000\u0000" - + "\u01f0\u01e7\u0001\u0000\u0000\u0000\u01f0\u01f1\u0001\u0000\u0000\u0000" - + "\u01f1\u01f3\u0001\u0000\u0000\u0000\u01f2\u01f4\u0007\u000b\u0000\u0000" - + "\u01f3\u01f2\u0001\u0000\u0000\u0000\u01f3\u01f4\u0001\u0000\u0000\u0000" - + "\u01f4\u0097\u0001\u0000\u0000\u0000\u01f5\u01fd\u0005\"\u0000\u0000\u01f6" - + "\u01f7\u0005\\\u0000\u0000\u01f7\u01fc\u0005\"\u0000\u0000\u01f8\u01f9" - + "\u0005\\\u0000\u0000\u01f9\u01fc\u0005\\\u0000\u0000\u01fa\u01fc\b\f\u0000" - + "\u0000\u01fb\u01f6\u0001\u0000\u0000\u0000\u01fb\u01f8\u0001\u0000\u0000" - + "\u0000\u01fb\u01fa\u0001\u0000\u0000\u0000\u01fc\u01ff\u0001\u0000\u0000" - + "\u0000\u01fd\u01fe\u0001\u0000\u0000\u0000\u01fd\u01fb\u0001\u0000\u0000" - + "\u0000\u01fe\u0200\u0001\u0000\u0000\u0000\u01ff\u01fd\u0001\u0000\u0000" - + "\u0000\u0200\u020e\u0005\"\u0000\u0000\u0201\u0209\u0005\'\u0000\u0000" - + "\u0202\u0203\u0005\\\u0000\u0000\u0203\u0208\u0005\'\u0000\u0000\u0204" - + "\u0205\u0005\\\u0000\u0000\u0205\u0208\u0005\\\u0000\u0000\u0206\u0208" - + "\b\r\u0000\u0000\u0207\u0202\u0001\u0000\u0000\u0000\u0207\u0204\u0001" - + "\u0000\u0000\u0000\u0207\u0206\u0001\u0000\u0000\u0000\u0208\u020b\u0001" - + "\u0000\u0000\u0000\u0209\u020a\u0001\u0000\u0000\u0000\u0209\u0207\u0001" - + "\u0000\u0000\u0000\u020a\u020c\u0001\u0000\u0000\u0000\u020b\u0209\u0001" - + "\u0000\u0000\u0000\u020c\u020e\u0005\'\u0000\u0000\u020d\u01f5\u0001\u0000" - + "\u0000\u0000\u020d\u0201\u0001\u0000\u0000\u0000\u020e\u0099\u0001\u0000" - + "\u0000\u0000\u020f\u0213\u0005/\u0000\u0000\u0210\u0211\u0005\\\u0000" - + "\u0000\u0211\u0214\b\u000e\u0000\u0000\u0212\u0214\b\u000f\u0000\u0000" - + "\u0213\u0210\u0001\u0000\u0000\u0000\u0213\u0212\u0001\u0000\u0000\u0000" - + "\u0214\u0215\u0001\u0000\u0000\u0000\u0215\u0216\u0001\u0000\u0000\u0000" - + "\u0215\u0213\u0001\u0000\u0000\u0000\u0216\u0217\u0001\u0000\u0000\u0000" - + "\u0217\u021b\u0005/\u0000\u0000\u0218\u021a\u0007\u0010\u0000\u0000\u0219" - + "\u0218\u0001\u0000\u0000\u0000\u021a\u021d\u0001\u0000\u0000\u0000\u021b" - + "\u0219\u0001\u0000\u0000\u0000\u021b\u021c\u0001\u0000\u0000\u0000\u021c" - + "\u021e\u0001\u0000\u0000\u0000\u021d\u021b\u0001\u0000\u0000\u0000\u021e" - + "\u021f\u0004L\u0001\u0000\u021f\u009b\u0001\u0000\u0000\u0000\u0220\u0221" - + "\u0005t\u0000\u0000\u0221\u0222\u0005r\u0000\u0000\u0222\u0223\u0005u" - + "\u0000\u0000\u0223\u0224\u0005e\u0000\u0000\u0224\u009d\u0001\u0000\u0000" - + "\u0000\u0225\u0226\u0005f\u0000\u0000\u0226\u0227\u0005a\u0000\u0000\u0227" - + "\u0228\u0005l\u0000\u0000\u0228\u0229\u0005s\u0000\u0000\u0229\u022a\u0005" - + "e\u0000\u0000\u022a\u009f\u0001\u0000\u0000\u0000\u022b\u022c\u0005n\u0000" - + "\u0000\u022c\u022d\u0005u\u0000\u0000\u022d\u022e\u0005l\u0000\u0000\u022e" - + "\u022f\u0005l\u0000\u0000\u022f\u00a1\u0001\u0000\u0000\u0000\u0230\u0231" - + "\u0005b\u0000\u0000\u0231\u0232\u0005o\u0000\u0000\u0232\u0233\u0005o" - + "\u0000\u0000\u0233\u0234\u0005l\u0000\u0000\u0234\u0235\u0005e\u0000\u0000" - + "\u0235\u0236\u0005a\u0000\u0000\u0236\u0257\u0005n\u0000\u0000\u0237\u0238" - + "\u0005b\u0000\u0000\u0238\u0239\u0005y\u0000\u0000\u0239\u023a\u0005t" - + "\u0000\u0000\u023a\u0257\u0005e\u0000\u0000\u023b\u023c\u0005s\u0000\u0000" - + "\u023c\u023d\u0005h\u0000\u0000\u023d\u023e\u0005o\u0000\u0000\u023e\u023f" - + "\u0005r\u0000\u0000\u023f\u0257\u0005t\u0000\u0000\u0240\u0241\u0005c" - + "\u0000\u0000\u0241\u0242\u0005h\u0000\u0000\u0242\u0243\u0005a\u0000\u0000" - + "\u0243\u0257\u0005r\u0000\u0000\u0244\u0245\u0005i\u0000\u0000\u0245\u0246" - + "\u0005n\u0000\u0000\u0246\u0257\u0005t\u0000\u0000\u0247\u0248\u0005l" - + "\u0000\u0000\u0248\u0249\u0005o\u0000\u0000\u0249\u024a\u0005n\u0000\u0000" - + "\u024a\u0257\u0005g\u0000\u0000\u024b\u024c\u0005f\u0000\u0000\u024c\u024d" - + "\u0005l\u0000\u0000\u024d\u024e\u0005o\u0000\u0000\u024e\u024f\u0005a" - + "\u0000\u0000\u024f\u0257\u0005t\u0000\u0000\u0250\u0251\u0005d\u0000\u0000" - + "\u0251\u0252\u0005o\u0000\u0000\u0252\u0253\u0005u\u0000\u0000\u0253\u0254" - + "\u0005b\u0000\u0000\u0254\u0255\u0005l\u0000\u0000\u0255\u0257\u0005e" - + "\u0000\u0000\u0256\u0230\u0001\u0000\u0000\u0000\u0256\u0237\u0001\u0000" - + "\u0000\u0000\u0256\u023b\u0001\u0000\u0000\u0000\u0256\u0240\u0001\u0000" - + "\u0000\u0000\u0256\u0244\u0001\u0000\u0000\u0000\u0256\u0247\u0001\u0000" - + "\u0000\u0000\u0256\u024b\u0001\u0000\u0000\u0000\u0256\u0250\u0001\u0000" - + "\u0000\u0000\u0257\u00a3\u0001\u0000\u0000\u0000\u0258\u0259\u0005d\u0000" - + "\u0000\u0259\u025a\u0005e\u0000\u0000\u025a\u025b\u0005f\u0000\u0000\u025b" - + "\u00a5\u0001\u0000\u0000\u0000\u025c\u0260\u0007\u0011\u0000\u0000\u025d" - + "\u025f\u0007\u0012\u0000\u0000\u025e\u025d\u0001\u0000\u0000\u0000\u025f" - + "\u0262\u0001\u0000\u0000\u0000\u0260\u025e\u0001\u0000\u0000\u0000\u0260" - + "\u0261\u0001\u0000\u0000\u0000\u0261\u00a7\u0001\u0000\u0000\u0000\u0262" - + "\u0260\u0001\u0000\u0000\u0000\u0263\u026c\u00050\u0000\u0000\u0264\u0268" - + "\u0007\u0006\u0000\u0000\u0265\u0267\u0007\u0007\u0000\u0000\u0266\u0265" - + "\u0001\u0000\u0000\u0000\u0267\u026a\u0001\u0000\u0000\u0000\u0268\u0266" - + "\u0001\u0000\u0000\u0000\u0268\u0269\u0001\u0000\u0000\u0000\u0269\u026c" - + "\u0001\u0000\u0000\u0000\u026a\u0268\u0001\u0000\u0000\u0000\u026b\u0263" - + "\u0001\u0000\u0000\u0000\u026b\u0264\u0001\u0000\u0000\u0000\u026c\u026d" - + "\u0001\u0000\u0000\u0000\u026d\u026e\u0006S\u0002\u0000\u026e\u00a9\u0001" - + "\u0000\u0000\u0000\u026f\u0273\u0007\u0011\u0000\u0000\u0270\u0272\u0007" - + "\u0012\u0000\u0000\u0271\u0270\u0001\u0000\u0000\u0000\u0272\u0275\u0001" - + "\u0000\u0000\u0000\u0273\u0271\u0001\u0000\u0000\u0000\u0273\u0274\u0001" - + "\u0000\u0000\u0000\u0274\u0276\u0001\u0000\u0000\u0000\u0275\u0273\u0001" - + "\u0000\u0000\u0000\u0276\u0277\u0006T\u0002\u0000\u0277\u00ab\u0001\u0000" - + "\u0000\u0000\"\u0000\u0001\u00af\u00b9\u00c3\u00c8\u01b9\u01bc\u01c3\u01c6" - + "\u01cd\u01d0\u01d3\u01da\u01dd\u01e3\u01e5\u01e9\u01ee\u01f0\u01f3\u01fb" - + "\u01fd\u0207\u0209\u020d\u0213\u0215\u021b\u0256\u0260\u0268\u026b\u0273" - + "\u0003\u0006\u0000\u0000\u0002\u0001\u0000\u0002\u0000\u0000"; + + "P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0001P\u0003" + + "P\u0256\bP\u0001Q\u0001Q\u0001Q\u0001Q\u0001R\u0001R\u0005R\u025e\bR\n" + + "R\fR\u0261\tR\u0001S\u0001S\u0001S\u0005S\u0266\bS\nS\fS\u0269\tS\u0003" + + "S\u026b\bS\u0001S\u0001S\u0001T\u0001T\u0005T\u0271\bT\nT\fT\u0274\tT" + + "\u0001T\u0001T\u0004\u00c2\u01fc\u0208\u0214\u0000U\u0002\u0001\u0004" + + "\u0002\u0006\u0003\b\u0004\n\u0005\f\u0006\u000e\u0007\u0010\b\u0012\t" + + "\u0014\n\u0016\u000b\u0018\f\u001a\r\u001c\u000e\u001e\u000f \u0010\"" + + "\u0011$\u0012&\u0013(\u0014*\u0015,\u0016.\u00170\u00182\u00194\u001a" + + "6\u001b8\u001c:\u001d<\u001e>\u001f@ B!D\"F#H$J%L&N\'P(R)T*V+X,Z-\\.^" + + "/`0b1d2f3h4j5l6n7p8r9t:v;x~?\u0080@\u0082A\u0084B\u0086C\u0088D\u008a" + + "E\u008cF\u008eG\u0090H\u0092I\u0094J\u0096K\u0098L\u009aM\u009cN\u009e" + + "O\u00a0P\u00a2Q\u00a4R\u00a6S\u00a8T\u00aaU\u0002\u0000\u0001\u0013\u0003" + + "\u0000\t\n\r\r \u0002\u0000\n\n\r\r\u0001\u000007\u0002\u0000LLll\u0002" + + "\u0000XXxx\u0003\u000009AFaf\u0001\u000019\u0001\u000009\u0006\u0000D" + + "DFFLLddffll\u0002\u0000EEee\u0002\u0000++--\u0004\u0000DDFFddff\u0002" + + "\u0000\"\"\\\\\u0002\u0000\'\'\\\\\u0001\u0000\n\n\u0002\u0000\n\n//\u0007" + + "\u0000UUcciilmssuuxx\u0003\u0000AZ__az\u0004\u000009AZ__az\u029d\u0000" + + "\u0002\u0001\u0000\u0000\u0000\u0000\u0004\u0001\u0000\u0000\u0000\u0000" + + "\u0006\u0001\u0000\u0000\u0000\u0000\b\u0001\u0000\u0000\u0000\u0000\n" + + "\u0001\u0000\u0000\u0000\u0000\f\u0001\u0000\u0000\u0000\u0000\u000e\u0001" + + "\u0000\u0000\u0000\u0000\u0010\u0001\u0000\u0000\u0000\u0000\u0012\u0001" + + "\u0000\u0000\u0000\u0000\u0014\u0001\u0000\u0000\u0000\u0000\u0016\u0001" + + "\u0000\u0000\u0000\u0000\u0018\u0001\u0000\u0000\u0000\u0000\u001a\u0001" + + "\u0000\u0000\u0000\u0000\u001c\u0001\u0000\u0000\u0000\u0000\u001e\u0001" + + "\u0000\u0000\u0000\u0000 \u0001\u0000\u0000\u0000\u0000\"\u0001\u0000" + + "\u0000\u0000\u0000$\u0001\u0000\u0000\u0000\u0000&\u0001\u0000\u0000\u0000" + + "\u0000(\u0001\u0000\u0000\u0000\u0000*\u0001\u0000\u0000\u0000\u0000," + + "\u0001\u0000\u0000\u0000\u0000.\u0001\u0000\u0000\u0000\u00000\u0001\u0000" + + "\u0000\u0000\u00002\u0001\u0000\u0000\u0000\u00004\u0001\u0000\u0000\u0000" + + "\u00006\u0001\u0000\u0000\u0000\u00008\u0001\u0000\u0000\u0000\u0000:" + + "\u0001\u0000\u0000\u0000\u0000<\u0001\u0000\u0000\u0000\u0000>\u0001\u0000" + + "\u0000\u0000\u0000@\u0001\u0000\u0000\u0000\u0000B\u0001\u0000\u0000\u0000" + + "\u0000D\u0001\u0000\u0000\u0000\u0000F\u0001\u0000\u0000\u0000\u0000H" + + "\u0001\u0000\u0000\u0000\u0000J\u0001\u0000\u0000\u0000\u0000L\u0001\u0000" + + "\u0000\u0000\u0000N\u0001\u0000\u0000\u0000\u0000P\u0001\u0000\u0000\u0000" + + "\u0000R\u0001\u0000\u0000\u0000\u0000T\u0001\u0000\u0000\u0000\u0000V" + + "\u0001\u0000\u0000\u0000\u0000X\u0001\u0000\u0000\u0000\u0000Z\u0001\u0000" + + "\u0000\u0000\u0000\\\u0001\u0000\u0000\u0000\u0000^\u0001\u0000\u0000" + + "\u0000\u0000`\u0001\u0000\u0000\u0000\u0000b\u0001\u0000\u0000\u0000\u0000" + + "d\u0001\u0000\u0000\u0000\u0000f\u0001\u0000\u0000\u0000\u0000h\u0001" + + "\u0000\u0000\u0000\u0000j\u0001\u0000\u0000\u0000\u0000l\u0001\u0000\u0000" + + "\u0000\u0000n\u0001\u0000\u0000\u0000\u0000p\u0001\u0000\u0000\u0000\u0000" + + "r\u0001\u0000\u0000\u0000\u0000t\u0001\u0000\u0000\u0000\u0000v\u0001" + + "\u0000\u0000\u0000\u0000x\u0001\u0000\u0000\u0000\u0000z\u0001\u0000\u0000" + + "\u0000\u0000|\u0001\u0000\u0000\u0000\u0000~\u0001\u0000\u0000\u0000\u0000" + + "\u0080\u0001\u0000\u0000\u0000\u0000\u0082\u0001\u0000\u0000\u0000\u0000" + + "\u0084\u0001\u0000\u0000\u0000\u0000\u0086\u0001\u0000\u0000\u0000\u0000" + + "\u0088\u0001\u0000\u0000\u0000\u0000\u008a\u0001\u0000\u0000\u0000\u0000" + + "\u008c\u0001\u0000\u0000\u0000\u0000\u008e\u0001\u0000\u0000\u0000\u0000" + + "\u0090\u0001\u0000\u0000\u0000\u0000\u0092\u0001\u0000\u0000\u0000\u0000" + + "\u0094\u0001\u0000\u0000\u0000\u0000\u0096\u0001\u0000\u0000\u0000\u0000" + + "\u0098\u0001\u0000\u0000\u0000\u0000\u009a\u0001\u0000\u0000\u0000\u0000" + + "\u009c\u0001\u0000\u0000\u0000\u0000\u009e\u0001\u0000\u0000\u0000\u0000" + + "\u00a0\u0001\u0000\u0000\u0000\u0000\u00a2\u0001\u0000\u0000\u0000\u0000" + + "\u00a4\u0001\u0000\u0000\u0000\u0000\u00a6\u0001\u0000\u0000\u0000\u0001" + + "\u00a8\u0001\u0000\u0000\u0000\u0001\u00aa\u0001\u0000\u0000\u0000\u0002" + + "\u00ad\u0001\u0000\u0000\u0000\u0004\u00c7\u0001\u0000\u0000\u0000\u0006" + + "\u00cb\u0001\u0000\u0000\u0000\b\u00cd\u0001\u0000\u0000\u0000\n\u00cf" + + "\u0001\u0000\u0000\u0000\f\u00d1\u0001\u0000\u0000\u0000\u000e\u00d3\u0001" + + "\u0000\u0000\u0000\u0010\u00d5\u0001\u0000\u0000\u0000\u0012\u00d7\u0001" + + "\u0000\u0000\u0000\u0014\u00db\u0001\u0000\u0000\u0000\u0016\u00e0\u0001" + + "\u0000\u0000\u0000\u0018\u00e2\u0001\u0000\u0000\u0000\u001a\u00e4\u0001" + + "\u0000\u0000\u0000\u001c\u00e7\u0001\u0000\u0000\u0000\u001e\u00ea\u0001" + + "\u0000\u0000\u0000 \u00ef\u0001\u0000\u0000\u0000\"\u00f5\u0001\u0000" + + "\u0000\u0000$\u00f8\u0001\u0000\u0000\u0000&\u00fc\u0001\u0000\u0000\u0000" + + "(\u0105\u0001\u0000\u0000\u0000*\u010b\u0001\u0000\u0000\u0000,\u0112" + + "\u0001\u0000\u0000\u0000.\u0116\u0001\u0000\u0000\u00000\u011a\u0001\u0000" + + "\u0000\u00002\u0120\u0001\u0000\u0000\u00004\u0126\u0001\u0000\u0000\u0000" + + "6\u012b\u0001\u0000\u0000\u00008\u0136\u0001\u0000\u0000\u0000:\u0138" + + "\u0001\u0000\u0000\u0000<\u013a\u0001\u0000\u0000\u0000>\u013c\u0001\u0000" + + "\u0000\u0000@\u013f\u0001\u0000\u0000\u0000B\u0141\u0001\u0000\u0000\u0000" + + "D\u0143\u0001\u0000\u0000\u0000F\u0145\u0001\u0000\u0000\u0000H\u0148" + + "\u0001\u0000\u0000\u0000J\u014b\u0001\u0000\u0000\u0000L\u014f\u0001\u0000" + + "\u0000\u0000N\u0151\u0001\u0000\u0000\u0000P\u0154\u0001\u0000\u0000\u0000" + + "R\u0156\u0001\u0000\u0000\u0000T\u0159\u0001\u0000\u0000\u0000V\u015c" + + "\u0001\u0000\u0000\u0000X\u0160\u0001\u0000\u0000\u0000Z\u0163\u0001\u0000" + + "\u0000\u0000\\\u0167\u0001\u0000\u0000\u0000^\u0169\u0001\u0000\u0000" + + "\u0000`\u016b\u0001\u0000\u0000\u0000b\u016d\u0001\u0000\u0000\u0000d" + + "\u0170\u0001\u0000\u0000\u0000f\u0173\u0001\u0000\u0000\u0000h\u0175\u0001" + + "\u0000\u0000\u0000j\u0177\u0001\u0000\u0000\u0000l\u017a\u0001\u0000\u0000" + + "\u0000n\u017d\u0001\u0000\u0000\u0000p\u0180\u0001\u0000\u0000\u0000r" + + "\u0183\u0001\u0000\u0000\u0000t\u0187\u0001\u0000\u0000\u0000v\u018a\u0001" + + "\u0000\u0000\u0000x\u018d\u0001\u0000\u0000\u0000z\u018f\u0001\u0000\u0000" + + "\u0000|\u0192\u0001\u0000\u0000\u0000~\u0195\u0001\u0000\u0000\u0000\u0080" + + "\u0198\u0001\u0000\u0000\u0000\u0082\u019b\u0001\u0000\u0000\u0000\u0084" + + "\u019e\u0001\u0000\u0000\u0000\u0086\u01a1\u0001\u0000\u0000\u0000\u0088" + + "\u01a4\u0001\u0000\u0000\u0000\u008a\u01a7\u0001\u0000\u0000\u0000\u008c" + + "\u01ab\u0001\u0000\u0000\u0000\u008e\u01af\u0001\u0000\u0000\u0000\u0090" + + "\u01b4\u0001\u0000\u0000\u0000\u0092\u01bd\u0001\u0000\u0000\u0000\u0094" + + "\u01cf\u0001\u0000\u0000\u0000\u0096\u01dc\u0001\u0000\u0000\u0000\u0098" + + "\u020c\u0001\u0000\u0000\u0000\u009a\u020e\u0001\u0000\u0000\u0000\u009c" + + "\u021f\u0001\u0000\u0000\u0000\u009e\u0224\u0001\u0000\u0000\u0000\u00a0" + + "\u022a\u0001\u0000\u0000\u0000\u00a2\u0255\u0001\u0000\u0000\u0000\u00a4" + + "\u0257\u0001\u0000\u0000\u0000\u00a6\u025b\u0001\u0000\u0000\u0000\u00a8" + + "\u026a\u0001\u0000\u0000\u0000\u00aa\u026e\u0001\u0000\u0000\u0000\u00ac" + + "\u00ae\u0007\u0000\u0000\u0000\u00ad\u00ac\u0001\u0000\u0000\u0000\u00ae" + + "\u00af\u0001\u0000\u0000\u0000\u00af\u00ad\u0001\u0000\u0000\u0000\u00af" + + "\u00b0\u0001\u0000\u0000\u0000\u00b0\u00b1\u0001\u0000\u0000\u0000\u00b1" + + "\u00b2\u0006\u0000\u0000\u0000\u00b2\u0003\u0001\u0000\u0000\u0000\u00b3" + + "\u00b4\u0005/\u0000\u0000\u00b4\u00b5\u0005/\u0000\u0000\u00b5\u00b9\u0001" + + "\u0000\u0000\u0000\u00b6\u00b8\b\u0001\u0000\u0000\u00b7\u00b6\u0001\u0000" + + "\u0000\u0000\u00b8\u00bb\u0001\u0000\u0000\u0000\u00b9\u00b7\u0001\u0000" + + "\u0000\u0000\u00b9\u00ba\u0001\u0000\u0000\u0000\u00ba\u00c8\u0001\u0000" + + "\u0000\u0000\u00bb\u00b9\u0001\u0000\u0000\u0000\u00bc\u00bd\u0005/\u0000" + + "\u0000\u00bd\u00be\u0005*\u0000\u0000\u00be\u00c2\u0001\u0000\u0000\u0000" + + "\u00bf\u00c1\t\u0000\u0000\u0000\u00c0\u00bf\u0001\u0000\u0000\u0000\u00c1" + + "\u00c4\u0001\u0000\u0000\u0000\u00c2\u00c3\u0001\u0000\u0000\u0000\u00c2" + + "\u00c0\u0001\u0000\u0000\u0000\u00c3\u00c5\u0001\u0000\u0000\u0000\u00c4" + + "\u00c2\u0001\u0000\u0000\u0000\u00c5\u00c6\u0005*\u0000\u0000\u00c6\u00c8" + + "\u0005/\u0000\u0000\u00c7\u00b3\u0001\u0000\u0000\u0000\u00c7\u00bc\u0001" + + "\u0000\u0000\u0000\u00c8\u00c9\u0001\u0000\u0000\u0000\u00c9\u00ca\u0006" + + "\u0001\u0000\u0000\u00ca\u0005\u0001\u0000\u0000\u0000\u00cb\u00cc\u0005" + + "{\u0000\u0000\u00cc\u0007\u0001\u0000\u0000\u0000\u00cd\u00ce\u0005}\u0000" + + "\u0000\u00ce\t\u0001\u0000\u0000\u0000\u00cf\u00d0\u0005[\u0000\u0000" + + "\u00d0\u000b\u0001\u0000\u0000\u0000\u00d1\u00d2\u0005]\u0000\u0000\u00d2" + + "\r\u0001\u0000\u0000\u0000\u00d3\u00d4\u0005(\u0000\u0000\u00d4\u000f" + + "\u0001\u0000\u0000\u0000\u00d5\u00d6\u0005)\u0000\u0000\u00d6\u0011\u0001" + + "\u0000\u0000\u0000\u00d7\u00d8\u0005.\u0000\u0000\u00d8\u00d9\u0001\u0000" + + "\u0000\u0000\u00d9\u00da\u0006\b\u0001\u0000\u00da\u0013\u0001\u0000\u0000" + + "\u0000\u00db\u00dc\u0005?\u0000\u0000\u00dc\u00dd\u0005.\u0000\u0000\u00dd" + + "\u00de\u0001\u0000\u0000\u0000\u00de\u00df\u0006\t\u0001\u0000\u00df\u0015" + + "\u0001\u0000\u0000\u0000\u00e0\u00e1\u0005,\u0000\u0000\u00e1\u0017\u0001" + + "\u0000\u0000\u0000\u00e2\u00e3\u0005;\u0000\u0000\u00e3\u0019\u0001\u0000" + + "\u0000\u0000\u00e4\u00e5\u0005i\u0000\u0000\u00e5\u00e6\u0005f\u0000\u0000" + + "\u00e6\u001b\u0001\u0000\u0000\u0000\u00e7\u00e8\u0005i\u0000\u0000\u00e8" + + "\u00e9\u0005n\u0000\u0000\u00e9\u001d\u0001\u0000\u0000\u0000\u00ea\u00eb" + + "\u0005e\u0000\u0000\u00eb\u00ec\u0005l\u0000\u0000\u00ec\u00ed\u0005s" + + "\u0000\u0000\u00ed\u00ee\u0005e\u0000\u0000\u00ee\u001f\u0001\u0000\u0000" + + "\u0000\u00ef\u00f0\u0005w\u0000\u0000\u00f0\u00f1\u0005h\u0000\u0000\u00f1" + + "\u00f2\u0005i\u0000\u0000\u00f2\u00f3\u0005l\u0000\u0000\u00f3\u00f4\u0005" + + "e\u0000\u0000\u00f4!\u0001\u0000\u0000\u0000\u00f5\u00f6\u0005d\u0000" + + "\u0000\u00f6\u00f7\u0005o\u0000\u0000\u00f7#\u0001\u0000\u0000\u0000\u00f8" + + "\u00f9\u0005f\u0000\u0000\u00f9\u00fa\u0005o\u0000\u0000\u00fa\u00fb\u0005" + + "r\u0000\u0000\u00fb%\u0001\u0000\u0000\u0000\u00fc\u00fd\u0005c\u0000" + + "\u0000\u00fd\u00fe\u0005o\u0000\u0000\u00fe\u00ff\u0005n\u0000\u0000\u00ff" + + "\u0100\u0005t\u0000\u0000\u0100\u0101\u0005i\u0000\u0000\u0101\u0102\u0005" + + "n\u0000\u0000\u0102\u0103\u0005u\u0000\u0000\u0103\u0104\u0005e\u0000" + + "\u0000\u0104\'\u0001\u0000\u0000\u0000\u0105\u0106\u0005b\u0000\u0000" + + "\u0106\u0107\u0005r\u0000\u0000\u0107\u0108\u0005e\u0000\u0000\u0108\u0109" + + "\u0005a\u0000\u0000\u0109\u010a\u0005k\u0000\u0000\u010a)\u0001\u0000" + + "\u0000\u0000\u010b\u010c\u0005r\u0000\u0000\u010c\u010d\u0005e\u0000\u0000" + + "\u010d\u010e\u0005t\u0000\u0000\u010e\u010f\u0005u\u0000\u0000\u010f\u0110" + + "\u0005r\u0000\u0000\u0110\u0111\u0005n\u0000\u0000\u0111+\u0001\u0000" + + "\u0000\u0000\u0112\u0113\u0005n\u0000\u0000\u0113\u0114\u0005e\u0000\u0000" + + "\u0114\u0115\u0005w\u0000\u0000\u0115-\u0001\u0000\u0000\u0000\u0116\u0117" + + "\u0005t\u0000\u0000\u0117\u0118\u0005r\u0000\u0000\u0118\u0119\u0005y" + + "\u0000\u0000\u0119/\u0001\u0000\u0000\u0000\u011a\u011b\u0005c\u0000\u0000" + + "\u011b\u011c\u0005a\u0000\u0000\u011c\u011d\u0005t\u0000\u0000\u011d\u011e" + + "\u0005c\u0000\u0000\u011e\u011f\u0005h\u0000\u0000\u011f1\u0001\u0000" + + "\u0000\u0000\u0120\u0121\u0005t\u0000\u0000\u0121\u0122\u0005h\u0000\u0000" + + "\u0122\u0123\u0005r\u0000\u0000\u0123\u0124\u0005o\u0000\u0000\u0124\u0125" + + "\u0005w\u0000\u0000\u01253\u0001\u0000\u0000\u0000\u0126\u0127\u0005t" + + "\u0000\u0000\u0127\u0128\u0005h\u0000\u0000\u0128\u0129\u0005i\u0000\u0000" + + "\u0129\u012a\u0005s\u0000\u0000\u012a5\u0001\u0000\u0000\u0000\u012b\u012c" + + "\u0005i\u0000\u0000\u012c\u012d\u0005n\u0000\u0000\u012d\u012e\u0005s" + + "\u0000\u0000\u012e\u012f\u0005t\u0000\u0000\u012f\u0130\u0005a\u0000\u0000" + + "\u0130\u0131\u0005n\u0000\u0000\u0131\u0132\u0005c\u0000\u0000\u0132\u0133" + + "\u0005e\u0000\u0000\u0133\u0134\u0005o\u0000\u0000\u0134\u0135\u0005f" + + "\u0000\u0000\u01357\u0001\u0000\u0000\u0000\u0136\u0137\u0005!\u0000\u0000" + + "\u01379\u0001\u0000\u0000\u0000\u0138\u0139\u0005~\u0000\u0000\u0139;" + + "\u0001\u0000\u0000\u0000\u013a\u013b\u0005*\u0000\u0000\u013b=\u0001\u0000" + + "\u0000\u0000\u013c\u013d\u0005/\u0000\u0000\u013d\u013e\u0004\u001e\u0000" + + "\u0000\u013e?\u0001\u0000\u0000\u0000\u013f\u0140\u0005%\u0000\u0000\u0140" + + "A\u0001\u0000\u0000\u0000\u0141\u0142\u0005+\u0000\u0000\u0142C\u0001" + + "\u0000\u0000\u0000\u0143\u0144\u0005-\u0000\u0000\u0144E\u0001\u0000\u0000" + + "\u0000\u0145\u0146\u0005<\u0000\u0000\u0146\u0147\u0005<\u0000\u0000\u0147" + + "G\u0001\u0000\u0000\u0000\u0148\u0149\u0005>\u0000\u0000\u0149\u014a\u0005" + + ">\u0000\u0000\u014aI\u0001\u0000\u0000\u0000\u014b\u014c\u0005>\u0000" + + "\u0000\u014c\u014d\u0005>\u0000\u0000\u014d\u014e\u0005>\u0000\u0000\u014e" + + "K\u0001\u0000\u0000\u0000\u014f\u0150\u0005<\u0000\u0000\u0150M\u0001" + + "\u0000\u0000\u0000\u0151\u0152\u0005<\u0000\u0000\u0152\u0153\u0005=\u0000" + + "\u0000\u0153O\u0001\u0000\u0000\u0000\u0154\u0155\u0005>\u0000\u0000\u0155" + + "Q\u0001\u0000\u0000\u0000\u0156\u0157\u0005>\u0000\u0000\u0157\u0158\u0005" + + "=\u0000\u0000\u0158S\u0001\u0000\u0000\u0000\u0159\u015a\u0005=\u0000" + + "\u0000\u015a\u015b\u0005=\u0000\u0000\u015bU\u0001\u0000\u0000\u0000\u015c" + + "\u015d\u0005=\u0000\u0000\u015d\u015e\u0005=\u0000\u0000\u015e\u015f\u0005" + + "=\u0000\u0000\u015fW\u0001\u0000\u0000\u0000\u0160\u0161\u0005!\u0000" + + "\u0000\u0161\u0162\u0005=\u0000\u0000\u0162Y\u0001\u0000\u0000\u0000\u0163" + + "\u0164\u0005!\u0000\u0000\u0164\u0165\u0005=\u0000\u0000\u0165\u0166\u0005" + + "=\u0000\u0000\u0166[\u0001\u0000\u0000\u0000\u0167\u0168\u0005&\u0000" + + "\u0000\u0168]\u0001\u0000\u0000\u0000\u0169\u016a\u0005^\u0000\u0000\u016a" + + "_\u0001\u0000\u0000\u0000\u016b\u016c\u0005|\u0000\u0000\u016ca\u0001" + + "\u0000\u0000\u0000\u016d\u016e\u0005&\u0000\u0000\u016e\u016f\u0005&\u0000" + + "\u0000\u016fc\u0001\u0000\u0000\u0000\u0170\u0171\u0005|\u0000\u0000\u0171" + + "\u0172\u0005|\u0000\u0000\u0172e\u0001\u0000\u0000\u0000\u0173\u0174\u0005" + + "?\u0000\u0000\u0174g\u0001\u0000\u0000\u0000\u0175\u0176\u0005:\u0000" + + "\u0000\u0176i\u0001\u0000\u0000\u0000\u0177\u0178\u0005?\u0000\u0000\u0178" + + "\u0179\u0005:\u0000\u0000\u0179k\u0001\u0000\u0000\u0000\u017a\u017b\u0005" + + ":\u0000\u0000\u017b\u017c\u0005:\u0000\u0000\u017cm\u0001\u0000\u0000" + + "\u0000\u017d\u017e\u0005-\u0000\u0000\u017e\u017f\u0005>\u0000\u0000\u017f" + + "o\u0001\u0000\u0000\u0000\u0180\u0181\u0005=\u0000\u0000\u0181\u0182\u0005" + + "~\u0000\u0000\u0182q\u0001\u0000\u0000\u0000\u0183\u0184\u0005=\u0000" + + "\u0000\u0184\u0185\u0005=\u0000\u0000\u0185\u0186\u0005~\u0000\u0000\u0186" + + "s\u0001\u0000\u0000\u0000\u0187\u0188\u0005+\u0000\u0000\u0188\u0189\u0005" + + "+\u0000\u0000\u0189u\u0001\u0000\u0000\u0000\u018a\u018b\u0005-\u0000" + + "\u0000\u018b\u018c\u0005-\u0000\u0000\u018cw\u0001\u0000\u0000\u0000\u018d" + + "\u018e\u0005=\u0000\u0000\u018ey\u0001\u0000\u0000\u0000\u018f\u0190\u0005" + + "+\u0000\u0000\u0190\u0191\u0005=\u0000\u0000\u0191{\u0001\u0000\u0000" + + "\u0000\u0192\u0193\u0005-\u0000\u0000\u0193\u0194\u0005=\u0000\u0000\u0194" + + "}\u0001\u0000\u0000\u0000\u0195\u0196\u0005*\u0000\u0000\u0196\u0197\u0005" + + "=\u0000\u0000\u0197\u007f\u0001\u0000\u0000\u0000\u0198\u0199\u0005/\u0000" + + "\u0000\u0199\u019a\u0005=\u0000\u0000\u019a\u0081\u0001\u0000\u0000\u0000" + + "\u019b\u019c\u0005%\u0000\u0000\u019c\u019d\u0005=\u0000\u0000\u019d\u0083" + + "\u0001\u0000\u0000\u0000\u019e\u019f\u0005&\u0000\u0000\u019f\u01a0\u0005" + + "=\u0000\u0000\u01a0\u0085\u0001\u0000\u0000\u0000\u01a1\u01a2\u0005^\u0000" + + "\u0000\u01a2\u01a3\u0005=\u0000\u0000\u01a3\u0087\u0001\u0000\u0000\u0000" + + "\u01a4\u01a5\u0005|\u0000\u0000\u01a5\u01a6\u0005=\u0000\u0000\u01a6\u0089" + + "\u0001\u0000\u0000\u0000\u01a7\u01a8\u0005<\u0000\u0000\u01a8\u01a9\u0005" + + "<\u0000\u0000\u01a9\u01aa\u0005=\u0000\u0000\u01aa\u008b\u0001\u0000\u0000" + + "\u0000\u01ab\u01ac\u0005>\u0000\u0000\u01ac\u01ad\u0005>\u0000\u0000\u01ad" + + "\u01ae\u0005=\u0000\u0000\u01ae\u008d\u0001\u0000\u0000\u0000\u01af\u01b0" + + "\u0005>\u0000\u0000\u01b0\u01b1\u0005>\u0000\u0000\u01b1\u01b2\u0005>" + + "\u0000\u0000\u01b2\u01b3\u0005=\u0000\u0000\u01b3\u008f\u0001\u0000\u0000" + + "\u0000\u01b4\u01b6\u00050\u0000\u0000\u01b5\u01b7\u0007\u0002\u0000\u0000" + + "\u01b6\u01b5\u0001\u0000\u0000\u0000\u01b7\u01b8\u0001\u0000\u0000\u0000" + + "\u01b8\u01b6\u0001\u0000\u0000\u0000\u01b8\u01b9\u0001\u0000\u0000\u0000" + + "\u01b9\u01bb\u0001\u0000\u0000\u0000\u01ba\u01bc\u0007\u0003\u0000\u0000" + + "\u01bb\u01ba\u0001\u0000\u0000\u0000\u01bb\u01bc\u0001\u0000\u0000\u0000" + + "\u01bc\u0091\u0001\u0000\u0000\u0000\u01bd\u01be\u00050\u0000\u0000\u01be" + + "\u01c0\u0007\u0004\u0000\u0000\u01bf\u01c1\u0007\u0005\u0000\u0000\u01c0" + + "\u01bf\u0001\u0000\u0000\u0000\u01c1\u01c2\u0001\u0000\u0000\u0000\u01c2" + + "\u01c0\u0001\u0000\u0000\u0000\u01c2\u01c3\u0001\u0000\u0000\u0000\u01c3" + + "\u01c5\u0001\u0000\u0000\u0000\u01c4\u01c6\u0007\u0003\u0000\u0000\u01c5" + + "\u01c4\u0001\u0000\u0000\u0000\u01c5\u01c6\u0001\u0000\u0000\u0000\u01c6" + + "\u0093\u0001\u0000\u0000\u0000\u01c7\u01d0\u00050\u0000\u0000\u01c8\u01cc" + + "\u0007\u0006\u0000\u0000\u01c9\u01cb\u0007\u0007\u0000\u0000\u01ca\u01c9" + + "\u0001\u0000\u0000\u0000\u01cb\u01ce\u0001\u0000\u0000\u0000\u01cc\u01ca" + + "\u0001\u0000\u0000\u0000\u01cc\u01cd\u0001\u0000\u0000\u0000\u01cd\u01d0" + + "\u0001\u0000\u0000\u0000\u01ce\u01cc\u0001\u0000\u0000\u0000\u01cf\u01c7" + + "\u0001\u0000\u0000\u0000\u01cf\u01c8\u0001\u0000\u0000\u0000\u01d0\u01d2" + + "\u0001\u0000\u0000\u0000\u01d1\u01d3\u0007\b\u0000\u0000\u01d2\u01d1\u0001" + + "\u0000\u0000\u0000\u01d2\u01d3\u0001\u0000\u0000\u0000\u01d3\u0095\u0001" + + "\u0000\u0000\u0000\u01d4\u01dd\u00050\u0000\u0000\u01d5\u01d9\u0007\u0006" + + "\u0000\u0000\u01d6\u01d8\u0007\u0007\u0000\u0000\u01d7\u01d6\u0001\u0000" + + "\u0000\u0000\u01d8\u01db\u0001\u0000\u0000\u0000\u01d9\u01d7\u0001\u0000" + + "\u0000\u0000\u01d9\u01da\u0001\u0000\u0000\u0000\u01da\u01dd\u0001\u0000" + + "\u0000\u0000\u01db\u01d9\u0001\u0000\u0000\u0000\u01dc\u01d4\u0001\u0000" + + "\u0000\u0000\u01dc\u01d5\u0001\u0000\u0000\u0000\u01dd\u01e4\u0001\u0000" + + "\u0000\u0000\u01de\u01e0\u0003\u0012\b\u0000\u01df\u01e1\u0007\u0007\u0000" + + "\u0000\u01e0\u01df\u0001\u0000\u0000\u0000\u01e1\u01e2\u0001\u0000\u0000" + + "\u0000\u01e2\u01e0\u0001\u0000\u0000\u0000\u01e2\u01e3\u0001\u0000\u0000" + + "\u0000\u01e3\u01e5\u0001\u0000\u0000\u0000\u01e4\u01de\u0001\u0000\u0000" + + "\u0000\u01e4\u01e5\u0001\u0000\u0000\u0000\u01e5\u01ef\u0001\u0000\u0000" + + "\u0000\u01e6\u01e8\u0007\t\u0000\u0000\u01e7\u01e9\u0007\n\u0000\u0000" + + "\u01e8\u01e7\u0001\u0000\u0000\u0000\u01e8\u01e9\u0001\u0000\u0000\u0000" + + "\u01e9\u01eb\u0001\u0000\u0000\u0000\u01ea\u01ec\u0007\u0007\u0000\u0000" + + "\u01eb\u01ea\u0001\u0000\u0000\u0000\u01ec\u01ed\u0001\u0000\u0000\u0000" + + "\u01ed\u01eb\u0001\u0000\u0000\u0000\u01ed\u01ee\u0001\u0000\u0000\u0000" + + "\u01ee\u01f0\u0001\u0000\u0000\u0000\u01ef\u01e6\u0001\u0000\u0000\u0000" + + "\u01ef\u01f0\u0001\u0000\u0000\u0000\u01f0\u01f2\u0001\u0000\u0000\u0000" + + "\u01f1\u01f3\u0007\u000b\u0000\u0000\u01f2\u01f1\u0001\u0000\u0000\u0000" + + "\u01f2\u01f3\u0001\u0000\u0000\u0000\u01f3\u0097\u0001\u0000\u0000\u0000" + + "\u01f4\u01fc\u0005\"\u0000\u0000\u01f5\u01f6\u0005\\\u0000\u0000\u01f6" + + "\u01fb\u0005\"\u0000\u0000\u01f7\u01f8\u0005\\\u0000\u0000\u01f8\u01fb" + + "\u0005\\\u0000\u0000\u01f9\u01fb\b\f\u0000\u0000\u01fa\u01f5\u0001\u0000" + + "\u0000\u0000\u01fa\u01f7\u0001\u0000\u0000\u0000\u01fa\u01f9\u0001\u0000" + + "\u0000\u0000\u01fb\u01fe\u0001\u0000\u0000\u0000\u01fc\u01fd\u0001\u0000" + + "\u0000\u0000\u01fc\u01fa\u0001\u0000\u0000\u0000\u01fd\u01ff\u0001\u0000" + + "\u0000\u0000\u01fe\u01fc\u0001\u0000\u0000\u0000\u01ff\u020d\u0005\"\u0000" + + "\u0000\u0200\u0208\u0005\'\u0000\u0000\u0201\u0202\u0005\\\u0000\u0000" + + "\u0202\u0207\u0005\'\u0000\u0000\u0203\u0204\u0005\\\u0000\u0000\u0204" + + "\u0207\u0005\\\u0000\u0000\u0205\u0207\b\r\u0000\u0000\u0206\u0201\u0001" + + "\u0000\u0000\u0000\u0206\u0203\u0001\u0000\u0000\u0000\u0206\u0205\u0001" + + "\u0000\u0000\u0000\u0207\u020a\u0001\u0000\u0000\u0000\u0208\u0209\u0001" + + "\u0000\u0000\u0000\u0208\u0206\u0001\u0000\u0000\u0000\u0209\u020b\u0001" + + "\u0000\u0000\u0000\u020a\u0208\u0001\u0000\u0000\u0000\u020b\u020d\u0005" + + "\'\u0000\u0000\u020c\u01f4\u0001\u0000\u0000\u0000\u020c\u0200\u0001\u0000" + + "\u0000\u0000\u020d\u0099\u0001\u0000\u0000\u0000\u020e\u0212\u0005/\u0000" + + "\u0000\u020f\u0210\u0005\\\u0000\u0000\u0210\u0213\b\u000e\u0000\u0000" + + "\u0211\u0213\b\u000f\u0000\u0000\u0212\u020f\u0001\u0000\u0000\u0000\u0212" + + "\u0211\u0001\u0000\u0000\u0000\u0213\u0214\u0001\u0000\u0000\u0000\u0214" + + "\u0215\u0001\u0000\u0000\u0000\u0214\u0212\u0001\u0000\u0000\u0000\u0215" + + "\u0216\u0001\u0000\u0000\u0000\u0216\u021a\u0005/\u0000\u0000\u0217\u0219" + + "\u0007\u0010\u0000\u0000\u0218\u0217\u0001\u0000\u0000\u0000\u0219\u021c" + + "\u0001\u0000\u0000\u0000\u021a\u0218\u0001\u0000\u0000\u0000\u021a\u021b" + + "\u0001\u0000\u0000\u0000\u021b\u021d\u0001\u0000\u0000\u0000\u021c\u021a" + + "\u0001\u0000\u0000\u0000\u021d\u021e\u0004L\u0001\u0000\u021e\u009b\u0001" + + "\u0000\u0000\u0000\u021f\u0220\u0005t\u0000\u0000\u0220\u0221\u0005r\u0000" + + "\u0000\u0221\u0222\u0005u\u0000\u0000\u0222\u0223\u0005e\u0000\u0000\u0223" + + "\u009d\u0001\u0000\u0000\u0000\u0224\u0225\u0005f\u0000\u0000\u0225\u0226" + + "\u0005a\u0000\u0000\u0226\u0227\u0005l\u0000\u0000\u0227\u0228\u0005s" + + "\u0000\u0000\u0228\u0229\u0005e\u0000\u0000\u0229\u009f\u0001\u0000\u0000" + + "\u0000\u022a\u022b\u0005n\u0000\u0000\u022b\u022c\u0005u\u0000\u0000\u022c" + + "\u022d\u0005l\u0000\u0000\u022d\u022e\u0005l\u0000\u0000\u022e\u00a1\u0001" + + "\u0000\u0000\u0000\u022f\u0230\u0005b\u0000\u0000\u0230\u0231\u0005o\u0000" + + "\u0000\u0231\u0232\u0005o\u0000\u0000\u0232\u0233\u0005l\u0000\u0000\u0233" + + "\u0234\u0005e\u0000\u0000\u0234\u0235\u0005a\u0000\u0000\u0235\u0256\u0005" + + "n\u0000\u0000\u0236\u0237\u0005b\u0000\u0000\u0237\u0238\u0005y\u0000" + + "\u0000\u0238\u0239\u0005t\u0000\u0000\u0239\u0256\u0005e\u0000\u0000\u023a" + + "\u023b\u0005s\u0000\u0000\u023b\u023c\u0005h\u0000\u0000\u023c\u023d\u0005" + + "o\u0000\u0000\u023d\u023e\u0005r\u0000\u0000\u023e\u0256\u0005t\u0000" + + "\u0000\u023f\u0240\u0005c\u0000\u0000\u0240\u0241\u0005h\u0000\u0000\u0241" + + "\u0242\u0005a\u0000\u0000\u0242\u0256\u0005r\u0000\u0000\u0243\u0244\u0005" + + "i\u0000\u0000\u0244\u0245\u0005n\u0000\u0000\u0245\u0256\u0005t\u0000" + + "\u0000\u0246\u0247\u0005l\u0000\u0000\u0247\u0248\u0005o\u0000\u0000\u0248" + + "\u0249\u0005n\u0000\u0000\u0249\u0256\u0005g\u0000\u0000\u024a\u024b\u0005" + + "f\u0000\u0000\u024b\u024c\u0005l\u0000\u0000\u024c\u024d\u0005o\u0000" + + "\u0000\u024d\u024e\u0005a\u0000\u0000\u024e\u0256\u0005t\u0000\u0000\u024f" + + "\u0250\u0005d\u0000\u0000\u0250\u0251\u0005o\u0000\u0000\u0251\u0252\u0005" + + "u\u0000\u0000\u0252\u0253\u0005b\u0000\u0000\u0253\u0254\u0005l\u0000" + + "\u0000\u0254\u0256\u0005e\u0000\u0000\u0255\u022f\u0001\u0000\u0000\u0000" + + "\u0255\u0236\u0001\u0000\u0000\u0000\u0255\u023a\u0001\u0000\u0000\u0000" + + "\u0255\u023f\u0001\u0000\u0000\u0000\u0255\u0243\u0001\u0000\u0000\u0000" + + "\u0255\u0246\u0001\u0000\u0000\u0000\u0255\u024a\u0001\u0000\u0000\u0000" + + "\u0255\u024f\u0001\u0000\u0000\u0000\u0256\u00a3\u0001\u0000\u0000\u0000" + + "\u0257\u0258\u0005d\u0000\u0000\u0258\u0259\u0005e\u0000\u0000\u0259\u025a" + + "\u0005f\u0000\u0000\u025a\u00a5\u0001\u0000\u0000\u0000\u025b\u025f\u0007" + + "\u0011\u0000\u0000\u025c\u025e\u0007\u0012\u0000\u0000\u025d\u025c\u0001" + + "\u0000\u0000\u0000\u025e\u0261\u0001\u0000\u0000\u0000\u025f\u025d\u0001" + + "\u0000\u0000\u0000\u025f\u0260\u0001\u0000\u0000\u0000\u0260\u00a7\u0001" + + "\u0000\u0000\u0000\u0261\u025f\u0001\u0000\u0000\u0000\u0262\u026b\u0005" + + "0\u0000\u0000\u0263\u0267\u0007\u0006\u0000\u0000\u0264\u0266\u0007\u0007" + + "\u0000\u0000\u0265\u0264\u0001\u0000\u0000\u0000\u0266\u0269\u0001\u0000" + + "\u0000\u0000\u0267\u0265\u0001\u0000\u0000\u0000\u0267\u0268\u0001\u0000" + + "\u0000\u0000\u0268\u026b\u0001\u0000\u0000\u0000\u0269\u0267\u0001\u0000" + + "\u0000\u0000\u026a\u0262\u0001\u0000\u0000\u0000\u026a\u0263\u0001\u0000" + + "\u0000\u0000\u026b\u026c\u0001\u0000\u0000\u0000\u026c\u026d\u0006S\u0002" + + "\u0000\u026d\u00a9\u0001\u0000\u0000\u0000\u026e\u0272\u0007\u0011\u0000" + + "\u0000\u026f\u0271\u0007\u0012\u0000\u0000\u0270\u026f\u0001\u0000\u0000" + + "\u0000\u0271\u0274\u0001\u0000\u0000\u0000\u0272\u0270\u0001\u0000\u0000" + + "\u0000\u0272\u0273\u0001\u0000\u0000\u0000\u0273\u0275\u0001\u0000\u0000" + + "\u0000\u0274\u0272\u0001\u0000\u0000\u0000\u0275\u0276\u0006T\u0002\u0000" + + "\u0276\u00ab\u0001\u0000\u0000\u0000\"\u0000\u0001\u00af\u00b9\u00c2\u00c7" + + "\u01b8\u01bb\u01c2\u01c5\u01cc\u01cf\u01d2\u01d9\u01dc\u01e2\u01e4\u01e8" + + "\u01ed\u01ef\u01f2\u01fa\u01fc\u0206\u0208\u020c\u0212\u0214\u021a\u0255" + + "\u025f\u0267\u026a\u0272\u0003\u0006\u0000\u0000\u0002\u0001\u0000\u0002" + + "\u0000\u0000"; public static final ATN _ATN = new ATNDeserializer().deserialize(_serializedATN.toCharArray()); static { _decisionToDFA = new DFA[_ATN.getNumberOfDecisions()]; diff --git a/modules/lang-painless/src/test/java/org/opensearch/painless/CommentTests.java b/modules/lang-painless/src/test/java/org/opensearch/painless/CommentTests.java new file mode 100644 index 0000000000000..dbba3226ba300 --- /dev/null +++ b/modules/lang-painless/src/test/java/org/opensearch/painless/CommentTests.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.painless; + +public class CommentTests extends ScriptTestCase { + + public void testSingleLineComments() { + assertEquals(5, exec("// comment\n return 5")); + assertEquals(5, exec("// comment\r return 5")); + assertEquals(5, exec("return 5 // comment no newline or return char")); + } + + public void testOpenCloseComments() { + assertEquals(5, exec("/* single-line comment */ return 5")); + assertEquals(5, exec("/* multi-line \n */ return 5")); + assertEquals(5, exec("/* multi-line \r */ return 5")); + assertEquals(5, exec("/* multi-line \n\n\r\r */ return 5")); + assertEquals(5, exec("def five = 5; /* multi-line \r */ return five")); + assertEquals(5, exec("return 5 /* multi-line ignored code */")); + } +} From feeb595be5c998efca4d11d65ea6a3d00cea17dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Vl=C4=8Dek?= Date: Wed, 10 Jan 2024 17:44:26 +0100 Subject: [PATCH 57/81] Fix typo in API annotation check message (#11836) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fixing some internal comments as well Signed-off-by: Lukáš Vlček --- CHANGELOG.md | 1 + .../processor/ApiAnnotationProcessor.java | 6 ++--- .../ApiAnnotationProcessorTests.java | 22 +++++++++---------- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e00e42e5b756..fed2bb54783a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) - Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) +- Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) ### Security diff --git a/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java b/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java index 1864aec4aa951..569f48a8465f3 100644 --- a/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java +++ b/libs/common/src/main/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessor.java @@ -113,7 +113,7 @@ private void process(ExecutableElement executable, Element enclosing) { // The executable element should not be internal (unless constructor for injectable core component) checkNotInternal(enclosing, executable); - // Check this elements annotations + // Check this element's annotations for (final AnnotationMirror annotation : executable.getAnnotationMirrors()) { final Element element = annotation.getAnnotationType().asElement(); if (inspectable(element)) { @@ -210,7 +210,7 @@ private void process(ExecutableElement executable, ReferenceType ref) { } } - // Check this elements annotations + // Check this element's annotations for (final AnnotationMirror annotation : ref.getAnnotationMirrors()) { final Element element = annotation.getAnnotationType().asElement(); if (inspectable(element)) { @@ -316,7 +316,7 @@ private void checkPublic(@Nullable Element referencedBy, final Element element) reportFailureAs, "The element " + element - + " is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi" + + " is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi" + ((referencedBy != null) ? " (referenced by " + referencedBy + ") " : "") ); } diff --git a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java index df04709458b29..8d8a4c7895339 100644 --- a/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java +++ b/libs/common/src/test/java/org/opensearch/common/annotation/processor/ApiAnnotationProcessorTests.java @@ -35,7 +35,7 @@ public void testPublicApiMethodArgumentNotAnnotated() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotated)" ) ) @@ -56,7 +56,7 @@ public void testPublicApiMethodArgumentNotAnnotatedGenerics() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedGenerics)" ) ) @@ -77,7 +77,7 @@ public void testPublicApiMethodThrowsNotAnnotated() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotatedException is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotatedException is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodThrowsNotAnnotated)" ) ) @@ -111,7 +111,7 @@ public void testPublicApiMethodArgumentNotAnnotatedPackagePrivate() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotatedPackagePrivate is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotatedPackagePrivate is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodArgumentNotAnnotatedPackagePrivate)" ) ) @@ -209,7 +209,7 @@ public void testPublicApiMethodReturnNotAnnotated() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotated)" ) ) @@ -230,7 +230,7 @@ public void testPublicApiMethodReturnNotAnnotatedGenerics() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedGenerics)" ) ) @@ -251,7 +251,7 @@ public void testPublicApiMethodReturnNotAnnotatedArray() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedArray)" ) ) @@ -272,7 +272,7 @@ public void testPublicApiMethodReturnNotAnnotatedBoundedGenerics() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedBoundedGenerics)" ) ) @@ -297,7 +297,7 @@ public void testPublicApiMethodReturnNotAnnotatedAnnotation() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnNotAnnotatedAnnotation)" ) ) @@ -388,7 +388,7 @@ public void testPublicApiMethodGenericsArgumentNotAnnotated() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotated is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodGenericsArgumentNotAnnotated)" ) ) @@ -453,7 +453,7 @@ public void testPublicApiMethodReturnAnnotatedGenerics() { matching( Diagnostic.Kind.ERROR, containsString( - "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not maked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "The element org.opensearch.common.annotation.processor.NotAnnotatedAnnotation is part of the public APIs but is not marked as @PublicApi, @ExperimentalApi or @DeprecatedApi " + "(referenced by org.opensearch.common.annotation.processor.PublicApiMethodReturnAnnotatedGenerics)" ) ) From 2f8bf031bf79baf42bc9fd2ba0269d0ece3cfbb9 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Wed, 10 Jan 2024 12:05:14 -0800 Subject: [PATCH 58/81] Bump 2.12.0 to Lucene 9.9.1 in Version.java (#11837) Signed-off-by: Marc Handalian --- libs/core/src/main/java/org/opensearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index b2a8e619c9720..6a92993f5dd42 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -98,7 +98,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_11_0 = new Version(2110099, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_1 = new Version(2110199, org.apache.lucene.util.Version.LUCENE_9_7_0); public static final Version V_2_11_2 = new Version(2110299, org.apache.lucene.util.Version.LUCENE_9_7_0); - public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_8_0); + public static final Version V_2_12_0 = new Version(2120099, org.apache.lucene.util.Version.LUCENE_9_9_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_9_1); public static final Version CURRENT = V_3_0_0; From 10be2eff03d8b7c93e2f5f4febb587edf12815e0 Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Wed, 10 Jan 2024 14:15:18 -0800 Subject: [PATCH 59/81] Enable test CardinalityWithRequestBreakerIT.testRequestBreaker after lucene 99 upgrade (#11841) Signed-off-by: Neetika Singhal --- .../aggregations/metrics/CardinalityWithRequestBreakerIT.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java index 6cbf278317b1b..94756f3fe9f99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/metrics/CardinalityWithRequestBreakerIT.java @@ -76,7 +76,6 @@ protected Settings featureFlagSettings() { /** * Test that searches using cardinality aggregations returns all request breaker memory. */ - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/10154") public void testRequestBreaker() throws Exception { final String requestBreaker = randomIntBetween(1, 10000) + "kb"; logger.info("--> Using request breaker setting: {}", requestBreaker); From de78c7ca6f4d3ca833263a320bb53dc7dd88ddc1 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 10 Jan 2024 18:47:29 -0600 Subject: [PATCH 60/81] Remove unused segment replication feature flag (#11850) This feature has been released and the feature flag is no longer used. Signed-off-by: Andrew Ross --- .../common/settings/FeatureFlagSettings.java | 22 ++++++------------- .../opensearch/common/util/FeatureFlags.java | 12 ---------- 2 files changed, 7 insertions(+), 27 deletions(-) diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index d3285c379bcc4..e19f8e8370d5b 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -11,9 +11,6 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.util.FeatureFlags; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; import java.util.Set; /** @@ -32,17 +29,12 @@ protected FeatureFlagSettings( super(settings, settingsSet, settingUpgraders, scope); } - public static final Set> BUILT_IN_FEATURE_FLAGS = Collections.unmodifiableSet( - new HashSet<>( - Arrays.asList( - FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING, - FeatureFlags.EXTENSIONS_SETTING, - FeatureFlags.IDENTITY_SETTING, - FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, - FeatureFlags.TELEMETRY_SETTING, - FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, - FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING - ) - ) + public static final Set> BUILT_IN_FEATURE_FLAGS = Set.of( + FeatureFlags.EXTENSIONS_SETTING, + FeatureFlags.IDENTITY_SETTING, + FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, + FeatureFlags.TELEMETRY_SETTING, + FeatureFlags.DATETIME_FORMATTER_CACHING_SETTING, + FeatureFlags.WRITEABLE_REMOTE_INDEX_SETTING ); } diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index c54772caa574b..d4ab161527cc0 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -20,12 +20,6 @@ * @opensearch.internal */ public class FeatureFlags { - /** - * Gates the visibility of the segment replication experimental features that allows users to test unreleased beta features. - */ - public static final String SEGMENT_REPLICATION_EXPERIMENTAL = - "opensearch.experimental.feature.segment_replication_experimental.enabled"; - /** * Gates the ability for Searchable Snapshots to read snapshots that are older than the * guaranteed backward compatibility for OpenSearch (one prior major version) on a best effort basis. @@ -105,12 +99,6 @@ public static boolean isEnabled(Setting featureFlag) { } } - public static final Setting SEGMENT_REPLICATION_EXPERIMENTAL_SETTING = Setting.boolSetting( - SEGMENT_REPLICATION_EXPERIMENTAL, - false, - Property.NodeScope - ); - public static final Setting EXTENSIONS_SETTING = Setting.boolSetting(EXTENSIONS, false, Property.NodeScope); public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); From e4583a438132034bcc20762fa76ef02a4b8713d9 Mon Sep 17 00:00:00 2001 From: Sagar <99425694+sgup432@users.noreply.github.com> Date: Wed, 10 Jan 2024 17:06:29 -0800 Subject: [PATCH 61/81] [Tiered Caching] Enable serialization of IndicesRequestCache.Key (#10275) --------- Signed-off-by: Sagar Upadhyaya Signed-off-by: Sagar <99425694+sgup432@users.noreply.github.com> Co-authored-by: Kiran Prakash --- CHANGELOG.md | 4 +- .../opensearch/core/index/shard/ShardId.java | 7 + .../indices/IndicesRequestCacheIT.java | 40 ++++ .../index/OpenSearchDirectoryReader.java | 61 ++++- .../indices/IndicesRequestCache.java | 101 +++++--- .../opensearch/indices/IndicesService.java | 16 +- .../indices/IndicesRequestCacheTests.java | 224 +++++++++++------- .../indices/IndicesServiceCloseTests.java | 19 +- 8 files changed, 327 insertions(+), 145 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fed2bb54783a7..0b256473dec6f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -101,8 +101,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) -- [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753] - (https://github.com/opensearch-project/OpenSearch/pull/10753)) +- [Tiered caching] Enabling serialization for IndicesRequestCache key object ([#10275](https://github.com/opensearch-project/OpenSearch/pull/10275)) +- [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753](https://github.com/opensearch-project/OpenSearch/pull/10753)) - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) - Update the indexRandom function to create more segments for concurrent search tests ([10247](https://github.com/opensearch-project/OpenSearch/pull/10247)) - Add support for query profiler with concurrent aggregation ([#9248](https://github.com/opensearch-project/OpenSearch/pull/9248)) diff --git a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java index c0abad7ed727f..1e48cf1f476da 100644 --- a/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java +++ b/libs/core/src/main/java/org/opensearch/core/index/shard/ShardId.java @@ -32,6 +32,7 @@ package org.opensearch.core.index.shard; +import org.apache.lucene.util.RamUsageEstimator; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -55,6 +56,8 @@ public class ShardId implements Comparable, ToXContentFragment, Writeab private final int shardId; private final int hashCode; + private final static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ShardId.class); + /** * Constructs a new shard id. * @param index the index name @@ -88,6 +91,10 @@ public ShardId(StreamInput in) throws IOException { hashCode = computeHashCode(); } + public long getBaseRamBytesUsed() { + return BASE_RAM_BYTES_USED; + } + /** * Writes this shard id to a stream. * @param out the stream to write to diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index 848f6eddbb0df..51dba07a8f9f8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -636,6 +636,45 @@ public void testProfileDisableCache() throws Exception { } } + public void testCacheWithInvalidation() throws Exception { + Client client = client(); + assertAcked( + client.admin() + .indices() + .prepareCreate("index") + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .get() + ); + indexRandom(true, client.prepareIndex("index").setSource("k", "hello")); + ensureSearchable("index"); + SearchResponse resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + assertThat(resp.getHits().getTotalHits().value, equalTo(1L)); + + assertCacheState(client, "index", 0, 1); + // Index but don't refresh + indexRandom(false, client.prepareIndex("index").setSource("k", "hello2")); + resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + // Should expect hit as here as refresh didn't happen + assertCacheState(client, "index", 1, 1); + + // Explicit refresh would invalidate cache + refresh(); + // Hit same query again + resp = client.prepareSearch("index").setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + // Should expect miss as key has changed due to change in IndexReader.CacheKey (due to refresh) + assertCacheState(client, "index", 1, 2); + } + private static void assertCacheState(Client client, String index, long expectedHits, long expectedMisses) { RequestCacheStats requestCacheStats = client.admin() .indices() @@ -650,6 +689,7 @@ private static void assertCacheState(Client client, String index, long expectedH Arrays.asList(expectedHits, expectedMisses, 0L), Arrays.asList(requestCacheStats.getHitCount(), requestCacheStats.getMissCount(), requestCacheStats.getEvictions()) ); + } } diff --git a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java index d6b2bb239b2a1..f9a87b9e74214 100644 --- a/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java +++ b/server/src/main/java/org/opensearch/common/lucene/index/OpenSearchDirectoryReader.java @@ -40,6 +40,8 @@ import org.opensearch.core.index.shard.ShardId; import java.io.IOException; +import java.util.Optional; +import java.util.UUID; /** * A {@link org.apache.lucene.index.FilterDirectoryReader} that exposes @@ -53,11 +55,14 @@ public final class OpenSearchDirectoryReader extends FilterDirectoryReader { private final ShardId shardId; private final FilterDirectoryReader.SubReaderWrapper wrapper; + private final DelegatingCacheHelper delegatingCacheHelper; + private OpenSearchDirectoryReader(DirectoryReader in, FilterDirectoryReader.SubReaderWrapper wrapper, ShardId shardId) throws IOException { super(in, wrapper); this.wrapper = wrapper; this.shardId = shardId; + this.delegatingCacheHelper = new DelegatingCacheHelper(in.getReaderCacheHelper()); } /** @@ -70,7 +75,61 @@ public ShardId shardId() { @Override public CacheHelper getReaderCacheHelper() { // safe to delegate since this reader does not alter the index - return in.getReaderCacheHelper(); + return this.delegatingCacheHelper; + } + + public DelegatingCacheHelper getDelegatingCacheHelper() { + return this.delegatingCacheHelper; + } + + /** + * Wraps existing IndexReader cache helper which internally provides a way to wrap CacheKey. + * @opensearch.internal + */ + public class DelegatingCacheHelper implements CacheHelper { + private final CacheHelper cacheHelper; + private final DelegatingCacheKey serializableCacheKey; + + DelegatingCacheHelper(CacheHelper cacheHelper) { + this.cacheHelper = cacheHelper; + this.serializableCacheKey = new DelegatingCacheKey(Optional.ofNullable(cacheHelper).map(key -> getKey()).orElse(null)); + } + + @Override + public CacheKey getKey() { + return this.cacheHelper.getKey(); + } + + public DelegatingCacheKey getDelegatingCacheKey() { + return this.serializableCacheKey; + } + + @Override + public void addClosedListener(ClosedListener listener) { + this.cacheHelper.addClosedListener(listener); + } + } + + /** + * Wraps internal IndexReader.CacheKey and attaches a uniqueId to it which can be eventually be used instead of + * object itself for serialization purposes. + */ + public class DelegatingCacheKey { + private final CacheKey cacheKey; + private final String uniqueId; + + DelegatingCacheKey(CacheKey cacheKey) { + this.cacheKey = cacheKey; + this.uniqueId = UUID.randomUUID().toString(); + } + + public CacheKey getCacheKey() { + return this.cacheKey; + } + + public String getId() { + return uniqueId; + } } @Override diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 4a19f8eb8714d..6d5c23274dbd6 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -51,7 +51,12 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.shard.IndexShard; import java.io.Closeable; import java.io.IOException; @@ -60,8 +65,10 @@ import java.util.HashSet; import java.util.Iterator; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; /** * The indices request cache allows to cache a shard level request stage responses, helping with improving @@ -103,13 +110,16 @@ public final class IndicesRequestCache implements RemovalListener registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); private final Set keysToClean = ConcurrentCollections.newConcurrentSet(); private final ByteSizeValue size; private final TimeValue expire; private final Cache cache; + private final Function> cacheEntityLookup; - IndicesRequestCache(Settings settings) { + IndicesRequestCache(Settings settings, Function> cacheEntityFunction) { this.size = INDICES_CACHE_QUERY_SIZE.get(settings); this.expire = INDICES_CACHE_QUERY_EXPIRE.exists(settings) ? INDICES_CACHE_QUERY_EXPIRE.get(settings) : null; long sizeInBytes = size.getBytes(); @@ -121,6 +131,7 @@ public final class IndicesRequestCache implements RemovalListener notification) { - notification.getKey().entity.onRemoval(notification); + // In case this event happens for an old shard, we can safely ignore this as we don't keep track for old + // shards as part of request cache. + cacheEntityLookup.apply(notification.getKey().shardId).ifPresent(entity -> entity.onRemoval(notification)); } BytesReference getOrCompute( - CacheEntity cacheEntity, + IndicesService.IndexShardCacheEntity cacheEntity, CheckedSupplier loader, DirectoryReader reader, BytesReference cacheKey ) throws Exception { assert reader.getReaderCacheHelper() != null; - final Key key = new Key(cacheEntity, reader.getReaderCacheHelper().getKey(), cacheKey); + assert reader.getReaderCacheHelper() instanceof OpenSearchDirectoryReader.DelegatingCacheHelper; + + OpenSearchDirectoryReader.DelegatingCacheHelper delegatingCacheHelper = (OpenSearchDirectoryReader.DelegatingCacheHelper) reader + .getReaderCacheHelper(); + String readerCacheKeyId = delegatingCacheHelper.getDelegatingCacheKey().getId(); + assert readerCacheKeyId != null; + final Key key = new Key(((IndexShard) cacheEntity.getCacheIdentity()).shardId(), cacheKey, readerCacheKeyId); Loader cacheLoader = new Loader(cacheEntity, loader); BytesReference value = cache.computeIfAbsent(key, cacheLoader); if (cacheLoader.isLoaded()) { - key.entity.onMiss(); + cacheEntity.onMiss(); // see if its the first time we see this reader, and make sure to register a cleanup key - CleanupKey cleanupKey = new CleanupKey(cacheEntity, reader.getReaderCacheHelper().getKey()); + CleanupKey cleanupKey = new CleanupKey(cacheEntity, readerCacheKeyId); if (!registeredClosedListeners.containsKey(cleanupKey)) { Boolean previous = registeredClosedListeners.putIfAbsent(cleanupKey, Boolean.TRUE); if (previous == null) { @@ -159,7 +178,7 @@ BytesReference getOrCompute( } } } else { - key.entity.onHit(); + cacheEntity.onHit(); } return value; } @@ -170,9 +189,14 @@ BytesReference getOrCompute( * @param reader the reader to invalidate the cache entry for * @param cacheKey the cache key to invalidate */ - void invalidate(CacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { + void invalidate(IndicesService.IndexShardCacheEntity cacheEntity, DirectoryReader reader, BytesReference cacheKey) { assert reader.getReaderCacheHelper() != null; - cache.invalidate(new Key(cacheEntity, reader.getReaderCacheHelper().getKey(), cacheKey)); + String readerCacheKeyId = null; + if (reader instanceof OpenSearchDirectoryReader) { + IndexReader.CacheHelper cacheHelper = ((OpenSearchDirectoryReader) reader).getDelegatingCacheHelper(); + readerCacheKeyId = ((OpenSearchDirectoryReader.DelegatingCacheHelper) cacheHelper).getDelegatingCacheKey().getId(); + } + cache.invalidate(new Key(((IndexShard) cacheEntity.getCacheIdentity()).shardId(), cacheKey, readerCacheKeyId)); } /** @@ -240,6 +264,7 @@ interface CacheEntity extends Accountable { * Called when this entity instance is removed */ void onRemoval(RemovalNotification notification); + } /** @@ -247,22 +272,26 @@ interface CacheEntity extends Accountable { * * @opensearch.internal */ - public static class Key implements Accountable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class); - - public final CacheEntity entity; // use as identity equality - public final IndexReader.CacheKey readerCacheKey; + static class Key implements Accountable, Writeable { + public final ShardId shardId; // use as identity equality + public final String readerCacheKeyId; public final BytesReference value; - Key(CacheEntity entity, IndexReader.CacheKey readerCacheKey, BytesReference value) { - this.entity = entity; - this.readerCacheKey = Objects.requireNonNull(readerCacheKey); + Key(ShardId shardId, BytesReference value, String readerCacheKeyId) { + this.shardId = shardId; this.value = value; + this.readerCacheKeyId = Objects.requireNonNull(readerCacheKeyId); + } + + Key(StreamInput in) throws IOException { + this.shardId = in.readOptionalWriteable(ShardId::new); + this.readerCacheKeyId = in.readOptionalString(); + this.value = in.readBytesReference(); } @Override public long ramBytesUsed() { - return BASE_RAM_BYTES_USED + entity.ramBytesUsed() + value.length(); + return BASE_RAM_BYTES_USED + shardId.getBaseRamBytesUsed() + value.length(); } @Override @@ -276,28 +305,35 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Key key = (Key) o; - if (Objects.equals(readerCacheKey, key.readerCacheKey) == false) return false; - if (!entity.getCacheIdentity().equals(key.entity.getCacheIdentity())) return false; + if (!Objects.equals(readerCacheKeyId, key.readerCacheKeyId)) return false; + if (!shardId.equals(key.shardId)) return false; if (!value.equals(key.value)) return false; return true; } @Override public int hashCode() { - int result = entity.getCacheIdentity().hashCode(); - result = 31 * result + readerCacheKey.hashCode(); + int result = shardId.hashCode(); + result = 31 * result + readerCacheKeyId.hashCode(); result = 31 * result + value.hashCode(); return result; } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(shardId); + out.writeOptionalString(readerCacheKeyId); + out.writeBytesReference(value); + } } private class CleanupKey implements IndexReader.ClosedListener { final CacheEntity entity; - final IndexReader.CacheKey readerCacheKey; + final String readerCacheKeyId; - private CleanupKey(CacheEntity entity, IndexReader.CacheKey readerCacheKey) { + private CleanupKey(CacheEntity entity, String readerCacheKeyId) { this.entity = entity; - this.readerCacheKey = readerCacheKey; + this.readerCacheKeyId = readerCacheKeyId; } @Override @@ -315,7 +351,7 @@ public boolean equals(Object o) { return false; } CleanupKey that = (CleanupKey) o; - if (Objects.equals(readerCacheKey, that.readerCacheKey) == false) return false; + if (!Objects.equals(readerCacheKeyId, that.readerCacheKeyId)) return false; if (!entity.getCacheIdentity().equals(that.entity.getCacheIdentity())) return false; return true; } @@ -323,7 +359,7 @@ public boolean equals(Object o) { @Override public int hashCode() { int result = entity.getCacheIdentity().hashCode(); - result = 31 * result + Objects.hashCode(readerCacheKey); + result = 31 * result + Objects.hashCode(readerCacheKeyId); return result; } } @@ -339,9 +375,9 @@ synchronized void cleanCache() { for (Iterator iterator = keysToClean.iterator(); iterator.hasNext();) { CleanupKey cleanupKey = iterator.next(); iterator.remove(); - if (cleanupKey.readerCacheKey == null || cleanupKey.entity.isOpen() == false) { + if (cleanupKey.readerCacheKeyId == null || !cleanupKey.entity.isOpen()) { // null indicates full cleanup, as does a closed shard - currentFullClean.add(cleanupKey.entity.getCacheIdentity()); + currentFullClean.add(((IndexShard) cleanupKey.entity.getCacheIdentity()).shardId()); } else { currentKeysToClean.add(cleanupKey); } @@ -349,10 +385,13 @@ synchronized void cleanCache() { if (!currentKeysToClean.isEmpty() || !currentFullClean.isEmpty()) { for (Iterator iterator = cache.keys().iterator(); iterator.hasNext();) { Key key = iterator.next(); - if (currentFullClean.contains(key.entity.getCacheIdentity())) { + if (currentFullClean.contains(key.shardId)) { iterator.remove(); } else { - if (currentKeysToClean.contains(new CleanupKey(key.entity, key.readerCacheKey))) { + // If the flow comes here, then we should have a open shard available on node. + if (currentKeysToClean.contains( + new CleanupKey(cacheEntityLookup.apply(key.shardId).orElse(null), key.readerCacheKeyId) + )) { iterator.remove(); } } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 5c3beaf8509bd..db5b93f073b03 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -409,7 +409,13 @@ public IndicesService( this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); this.analysisRegistry = analysisRegistry; this.indexNameExpressionResolver = indexNameExpressionResolver; - this.indicesRequestCache = new IndicesRequestCache(settings); + this.indicesRequestCache = new IndicesRequestCache(settings, (shardId -> { + IndexService indexService = this.indices.get(shardId.getIndex().getUUID()); + if (indexService == null) { + return Optional.empty(); + } + return Optional.of(new IndexShardCacheEntity(indexService.getShard(shardId.id()))); + })); this.indicesQueryCache = new IndicesQueryCache(settings); this.mapperRegistry = mapperRegistry; this.namedWriteableRegistry = namedWriteableRegistry; @@ -1744,7 +1750,6 @@ private BytesReference cacheShardLevelResult( BytesReference cacheKey, CheckedConsumer loader ) throws Exception { - IndexShardCacheEntity cacheEntity = new IndexShardCacheEntity(shard); CheckedSupplier supplier = () -> { /* BytesStreamOutput allows to pass the expected size but by default uses * BigArrays.PAGE_SIZE_IN_BYTES which is 16k. A common cached result ie. @@ -1761,7 +1766,7 @@ private BytesReference cacheShardLevelResult( return out.bytes(); } }; - return indicesRequestCache.getOrCompute(cacheEntity, supplier, reader, cacheKey); + return indicesRequestCache.getOrCompute(new IndexShardCacheEntity(shard), supplier, reader, cacheKey); } /** @@ -1769,11 +1774,12 @@ private BytesReference cacheShardLevelResult( * * @opensearch.internal */ - static final class IndexShardCacheEntity extends AbstractIndexShardCacheEntity { + public static class IndexShardCacheEntity extends AbstractIndexShardCacheEntity { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IndexShardCacheEntity.class); private final IndexShard indexShard; - protected IndexShardCacheEntity(IndexShard indexShard) { + public IndexShardCacheEntity(IndexShard indexShard) { this.indexShard = indexShard; } diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index 8494259c8fd8a..73728aec12e51 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -52,23 +52,36 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.AbstractBytesReference; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentHelper; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.IndexService; +import org.opensearch.index.cache.request.RequestCacheStats; import org.opensearch.index.cache.request.ShardRequestCache; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardState; +import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; import java.util.Arrays; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.Optional; +import java.util.UUID; -public class IndicesRequestCacheTests extends OpenSearchTestCase { +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class IndicesRequestCacheTests extends OpenSearchSingleNodeTestCase { public void testBasicOperationsCache() throws Exception { - ShardRequestCache requestCacheStats = new ShardRequestCache(); - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache( + Settings.EMPTY, + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))) + ); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -76,13 +89,13 @@ public void testBasicOperationsCache() throws Exception { DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - AtomicBoolean indexShard = new AtomicBoolean(true); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + ShardRequestCache requestCacheStats = indexShard.requestCache(); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -90,10 +103,11 @@ public void testBasicOperationsCache() throws Exception { assertEquals(1, cache.count()); // cache hit - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = indexShard.requestCache(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -106,7 +120,7 @@ public void testBasicOperationsCache() throws Exception { if (randomBoolean()) { reader.close(); } else { - indexShard.set(false); // closed shard but reader is still open + indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(entity); } cache.cleanCache(); @@ -122,9 +136,17 @@ public void testBasicOperationsCache() throws Exception { } public void testCacheDifferentReaders() throws Exception { - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); - AtomicBoolean indexShard = new AtomicBoolean(true); - ShardRequestCache requestCacheStats = new ShardRequestCache(); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + })); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -141,9 +163,10 @@ public void testCacheDifferentReaders() throws Exception { DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); + ShardRequestCache requestCacheStats = entity.stats(); assertEquals("foo", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); @@ -155,9 +178,10 @@ public void testCacheDifferentReaders() throws Exception { assertEquals(1, cache.numRegisteredCloseListeners()); // cache the second - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(secondReader, 0); value = cache.getOrCompute(entity, loader, secondReader, termBytes); + requestCacheStats = entity.stats(); assertEquals("bar", value.streamInput().readString()); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); @@ -167,9 +191,10 @@ public void testCacheDifferentReaders() throws Exception { assertTrue(requestCacheStats.stats().getMemorySize().bytesAsInt() > cacheSize + value.length()); assertEquals(2, cache.numRegisteredCloseListeners()); - secondEntity = new TestEntity(requestCacheStats, indexShard); + secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(secondReader, 0); value = cache.getOrCompute(secondEntity, loader, secondReader, termBytes); + requestCacheStats = entity.stats(); assertEquals("bar", value.streamInput().readString()); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); @@ -177,10 +202,11 @@ public void testCacheDifferentReaders() throws Exception { assertTrue(loader.loadedFromCache); assertEquals(2, cache.count()); - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = entity.stats(); assertEquals(2, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -201,7 +227,7 @@ public void testCacheDifferentReaders() throws Exception { if (randomBoolean()) { secondReader.close(); } else { - indexShard.set(false); // closed shard but reader is still open + indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(secondEntity); } cache.cleanCache(); @@ -218,9 +244,11 @@ public void testCacheDifferentReaders() throws Exception { public void testEviction() throws Exception { final ByteSizeValue size; { - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); - AtomicBoolean indexShard = new AtomicBoolean(true); - ShardRequestCache requestCacheStats = new ShardRequestCache(); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache( + Settings.EMPTY, + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))) + ); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -228,26 +256,26 @@ public void testEviction() throws Exception { DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader secondLoader = new Loader(secondReader, 0); BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value1.streamInput().readString()); BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); assertEquals("bar", value2.streamInput().readString()); - size = requestCacheStats.stats().getMemorySize(); + size = indexShard.requestCache().stats().getMemorySize(); IOUtils.close(reader, secondReader, writer, dir, cache); } + IndexShard indexShard = createIndex("test1").getShard(0); IndicesRequestCache cache = new IndicesRequestCache( - Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes() + 1 + "b").build() + Settings.builder().put(IndicesRequestCache.INDICES_CACHE_QUERY_SIZE.getKey(), size.getBytes() + 1 + "b").build(), + (shardId -> Optional.of(new IndicesService.IndexShardCacheEntity(indexShard))) ); - AtomicBoolean indexShard = new AtomicBoolean(true); - ShardRequestCache requestCacheStats = new ShardRequestCache(); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -255,36 +283,44 @@ public void testEviction() throws Exception { DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity thirddEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity thirddEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader thirdLoader = new Loader(thirdReader, 0); BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value1.streamInput().readString()); BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); assertEquals("bar", value2.streamInput().readString()); - logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); + logger.info("Memory size: {}", indexShard.requestCache().stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); assertEquals("baz", value3.streamInput().readString()); assertEquals(2, cache.count()); - assertEquals(1, requestCacheStats.stats().getEvictions()); + assertEquals(1, indexShard.requestCache().stats().getEvictions()); IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); } public void testClearAllEntityIdentity() throws Exception { - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); - AtomicBoolean indexShard = new AtomicBoolean(true); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + })); - ShardRequestCache requestCacheStats = new ShardRequestCache(); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -292,36 +328,39 @@ public void testClearAllEntityIdentity() throws Exception { DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "bar")); DirectoryReader secondReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - TestEntity secondEntity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity secondEntity = new IndicesService.IndexShardCacheEntity(indexShard); Loader secondLoader = new Loader(secondReader, 0); writer.updateDocument(new Term("id", "0"), newDoc(0, "baz")); DirectoryReader thirdReader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); - AtomicBoolean differentIdentity = new AtomicBoolean(true); - TestEntity thirddEntity = new TestEntity(requestCacheStats, differentIdentity); + IndicesService.IndexShardCacheEntity thirddEntity = new IndicesService.IndexShardCacheEntity(createIndex("test1").getShard(0)); Loader thirdLoader = new Loader(thirdReader, 0); BytesReference value1 = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value1.streamInput().readString()); BytesReference value2 = cache.getOrCompute(secondEntity, secondLoader, secondReader, termBytes); assertEquals("bar", value2.streamInput().readString()); - logger.info("Memory size: {}", requestCacheStats.stats().getMemorySize()); + logger.info("Memory size: {}", indexShard.requestCache().stats().getMemorySize()); BytesReference value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); assertEquals("baz", value3.streamInput().readString()); assertEquals(3, cache.count()); - final long hitCount = requestCacheStats.stats().getHitCount(); + RequestCacheStats requestCacheStats = entity.stats().stats(); + requestCacheStats.add(thirddEntity.stats().stats()); + final long hitCount = requestCacheStats.getHitCount(); // clear all for the indexShard Idendity even though is't still open cache.clear(randomFrom(entity, secondEntity)); cache.cleanCache(); assertEquals(1, cache.count()); // third has not been validated since it's a different identity value3 = cache.getOrCompute(thirddEntity, thirdLoader, thirdReader, termBytes); - assertEquals(hitCount + 1, requestCacheStats.stats().getHitCount()); + requestCacheStats = entity.stats().stats(); + requestCacheStats.add(thirddEntity.stats().stats()); + assertEquals(hitCount + 1, requestCacheStats.getHitCount()); assertEquals("baz", value3.streamInput().readString()); IOUtils.close(reader, secondReader, thirdReader, writer, dir, cache); @@ -365,8 +404,17 @@ public BytesReference get() { } public void testInvalidate() throws Exception { - ShardRequestCache requestCacheStats = new ShardRequestCache(); - IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexShard indexShard = createIndex("test").getShard(0); + IndicesRequestCache cache = new IndicesRequestCache(Settings.EMPTY, (shardId -> { + IndexService indexService = null; + try { + indexService = indicesService.indexServiceSafe(shardId.getIndex()); + } catch (IndexNotFoundException ex) { + return Optional.empty(); + } + return Optional.of(new IndicesService.IndexShardCacheEntity(indexService.getShard(shardId.id()))); + })); Directory dir = newDirectory(); IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig()); @@ -374,13 +422,13 @@ public void testInvalidate() throws Exception { DirectoryReader reader = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), new ShardId("foo", "bar", 1)); TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); - AtomicBoolean indexShard = new AtomicBoolean(true); // initial cache - TestEntity entity = new TestEntity(requestCacheStats, indexShard); + IndicesService.IndexShardCacheEntity entity = new IndicesService.IndexShardCacheEntity(indexShard); Loader loader = new Loader(reader, 0); BytesReference value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + ShardRequestCache requestCacheStats = entity.stats(); assertEquals(0, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -388,10 +436,11 @@ public void testInvalidate() throws Exception { assertEquals(1, cache.count()); // cache hit - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = entity.stats(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(1, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -401,11 +450,12 @@ public void testInvalidate() throws Exception { assertEquals(1, cache.numRegisteredCloseListeners()); // load again after invalidate - entity = new TestEntity(requestCacheStats, indexShard); + entity = new IndicesService.IndexShardCacheEntity(indexShard); loader = new Loader(reader, 0); cache.invalidate(entity, reader, termBytes); value = cache.getOrCompute(entity, loader, reader, termBytes); assertEquals("foo", value.streamInput().readString()); + requestCacheStats = entity.stats(); assertEquals(1, requestCacheStats.stats().getHitCount()); assertEquals(2, requestCacheStats.stats().getMissCount()); assertEquals(0, requestCacheStats.stats().getEvictions()); @@ -418,7 +468,7 @@ public void testInvalidate() throws Exception { if (randomBoolean()) { reader.close(); } else { - indexShard.set(false); // closed shard but reader is still open + indexShard.close("test", true, true); // closed shard but reader is still open cache.clear(entity); } cache.cleanCache(); @@ -433,22 +483,25 @@ public void testInvalidate() throws Exception { } public void testEqualsKey() throws IOException { - AtomicBoolean trueBoolean = new AtomicBoolean(true); - AtomicBoolean falseBoolean = new AtomicBoolean(false); + IndicesService indicesService = getInstanceFromNode(IndicesService.class); Directory dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig(); IndexWriter writer = new IndexWriter(dir, config); - IndexReader reader1 = DirectoryReader.open(writer); - IndexReader.CacheKey rKey1 = reader1.getReaderCacheHelper().getKey(); + ShardId shardId = new ShardId("foo", "bar", 1); + ShardId shardId1 = new ShardId("foo1", "bar1", 2); + IndexReader reader1 = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); + String rKey1 = ((OpenSearchDirectoryReader) reader1).getDelegatingCacheHelper().getDelegatingCacheKey().getId(); writer.addDocument(new Document()); - IndexReader reader2 = DirectoryReader.open(writer); - IndexReader.CacheKey rKey2 = reader2.getReaderCacheHelper().getKey(); + IndexReader reader2 = OpenSearchDirectoryReader.wrap(DirectoryReader.open(writer), shardId); + String rKey2 = ((OpenSearchDirectoryReader) reader2).getDelegatingCacheHelper().getDelegatingCacheKey().getId(); IOUtils.close(reader1, reader2, writer, dir); - IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey1, new TestBytesReference(1)); - IndicesRequestCache.Key key2 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey1, new TestBytesReference(1)); - IndicesRequestCache.Key key3 = new IndicesRequestCache.Key(new TestEntity(null, falseBoolean), rKey1, new TestBytesReference(1)); - IndicesRequestCache.Key key4 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey2, new TestBytesReference(1)); - IndicesRequestCache.Key key5 = new IndicesRequestCache.Key(new TestEntity(null, trueBoolean), rKey1, new TestBytesReference(2)); + IndexShard indexShard = mock(IndexShard.class); + when(indexShard.state()).thenReturn(IndexShardState.STARTED); + IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(shardId, new TestBytesReference(1), rKey1); + IndicesRequestCache.Key key2 = new IndicesRequestCache.Key(shardId, new TestBytesReference(1), rKey1); + IndicesRequestCache.Key key3 = new IndicesRequestCache.Key(shardId1, new TestBytesReference(1), rKey1); + IndicesRequestCache.Key key4 = new IndicesRequestCache.Key(shardId, new TestBytesReference(1), rKey2); + IndicesRequestCache.Key key5 = new IndicesRequestCache.Key(shardId, new TestBytesReference(2), rKey2); String s = "Some other random object"; assertEquals(key1, key1); assertEquals(key1, key2); @@ -459,6 +512,29 @@ public void testEqualsKey() throws IOException { assertNotEquals(key1, key5); } + public void testSerializationDeserializationOfCacheKey() throws Exception { + TermQueryBuilder termQuery = new TermQueryBuilder("id", "0"); + BytesReference termBytes = XContentHelper.toXContent(termQuery, MediaTypeRegistry.JSON, false); + IndexService indexService = createIndex("test"); + IndexShard indexShard = indexService.getShard(0); + IndicesService.IndexShardCacheEntity shardCacheEntity = new IndicesService.IndexShardCacheEntity(indexShard); + String readerCacheKeyId = UUID.randomUUID().toString(); + IndicesRequestCache.Key key1 = new IndicesRequestCache.Key(indexShard.shardId(), termBytes, readerCacheKeyId); + BytesReference bytesReference = null; + try (BytesStreamOutput out = new BytesStreamOutput()) { + key1.writeTo(out); + bytesReference = out.bytes(); + } + StreamInput in = bytesReference.streamInput(); + + IndicesRequestCache.Key key2 = new IndicesRequestCache.Key(in); + + assertEquals(readerCacheKeyId, key2.readerCacheKeyId); + assertEquals(((IndexShard) shardCacheEntity.getCacheIdentity()).shardId(), key2.shardId); + assertEquals(termBytes, key2.value); + + } + private class TestBytesReference extends AbstractBytesReference { int dummyValue; @@ -509,34 +585,4 @@ public boolean isFragment() { return false; } } - - private class TestEntity extends AbstractIndexShardCacheEntity { - private final AtomicBoolean standInForIndexShard; - private final ShardRequestCache shardRequestCache; - - private TestEntity(ShardRequestCache shardRequestCache, AtomicBoolean standInForIndexShard) { - this.standInForIndexShard = standInForIndexShard; - this.shardRequestCache = shardRequestCache; - } - - @Override - protected ShardRequestCache stats() { - return shardRequestCache; - } - - @Override - public boolean isOpen() { - return standInForIndexShard.get(); - } - - @Override - public Object getCacheIdentity() { - return standInForIndexShard; - } - - @Override - public long ramBytesUsed() { - return 42; - } - } } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java index add1dfc348a8b..8a00cd2db21c9 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceCloseTests.java @@ -44,18 +44,15 @@ import org.apache.lucene.search.Weight; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; -import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.common.bytes.BytesArray; -import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; -import org.opensearch.indices.IndicesRequestCache.Key; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.node.MockNode; import org.opensearch.node.Node; @@ -373,15 +370,12 @@ public void testCloseWhileOngoingRequestUsesRequestCache() throws Exception { assertEquals(1, indicesService.indicesRefCount.refCount()); assertEquals(0L, cache.count()); - IndicesRequestCache.CacheEntity cacheEntity = new IndicesRequestCache.CacheEntity() { + IndicesService.IndexShardCacheEntity cacheEntity = new IndicesService.IndexShardCacheEntity(shard) { @Override public long ramBytesUsed() { return 42; } - @Override - public void onCached(Key key, BytesReference value) {} - @Override public boolean isOpen() { return true; @@ -389,17 +383,8 @@ public boolean isOpen() { @Override public Object getCacheIdentity() { - return this; + return shard; } - - @Override - public void onHit() {} - - @Override - public void onMiss() {} - - @Override - public void onRemoval(RemovalNotification notification) {} }; cache.getOrCompute(cacheEntity, () -> new BytesArray("bar"), searcher.getDirectoryReader(), new BytesArray("foo")); assertEquals(1L, cache.count()); From bbe790bd9cbfe44035f63f22d196a694be5ff3ab Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Wed, 10 Jan 2024 20:53:07 -0800 Subject: [PATCH 62/81] Fix SimpleNestedIT.testExplain flaky test (#11681) Signed-off-by: Neetika Singhal --- .../search/nested/SimpleNestedExplainIT.java | 121 ++++++++++++++++++ .../search/nested/SimpleNestedIT.java | 9 +- 2 files changed, 128 insertions(+), 2 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java new file mode 100644 index 0000000000000..71f82d7c0b412 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedExplainIT.java @@ -0,0 +1,121 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.nested; + +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.join.ScoreMode; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchIntegTestCase; + +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.nestedQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +/** + * Creating a separate class with no parameterization to create and index documents in a single + * test run and compare search responses across concurrent and non-concurrent search. For more details, + * refer: https://github.com/opensearch-project/OpenSearch/issues/11413 + */ +public class SimpleNestedExplainIT extends OpenSearchIntegTestCase { + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + /* + * Tests the explain output for multiple docs. Concurrent search with multiple slices is tested + * here as call to indexRandomForMultipleSlices is made and compared with explain output for + * non-concurrent search use-case. Separate test class is created to test explain for 1 slice + * case in concurrent search, refer {@link SimpleExplainIT#testExplainWithSingleDoc} + * For more details, refer: https://github.com/opensearch-project/OpenSearch/issues/11413 + * */ + public void testExplainMultipleDocs() throws Exception { + assertAcked( + prepareCreate("test").setMapping( + jsonBuilder().startObject() + .startObject("properties") + .startObject("nested1") + .field("type", "nested") + .endObject() + .endObject() + .endObject() + ) + ); + + ensureGreen(); + + client().prepareIndex("test") + .setId("1") + .setSource( + jsonBuilder().startObject() + .field("field1", "value1") + .startArray("nested1") + .startObject() + .field("n_field1", "n_value1") + .endObject() + .startObject() + .field("n_field1", "n_value1") + .endObject() + .endArray() + .endObject() + ) + .setRefreshPolicy(IMMEDIATE) + .get(); + + indexRandomForMultipleSlices("test"); + + // Turn off the concurrent search setting to test search with non-concurrent search + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build()) + .get(); + + SearchResponse nonConSearchResp = client().prepareSearch("test") + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) + .setExplain(true) + .get(); + assertNoFailures(nonConSearchResp); + assertThat(nonConSearchResp.getHits().getTotalHits().value, equalTo(1L)); + Explanation nonConSearchExplain = nonConSearchResp.getHits().getHits()[0].getExplanation(); + assertThat(nonConSearchExplain.getValue(), equalTo(nonConSearchResp.getHits().getHits()[0].getScore())); + + // Turn on the concurrent search setting to test search with concurrent search + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build()) + .get(); + + SearchResponse conSearchResp = client().prepareSearch("test") + .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) + .setExplain(true) + .get(); + assertNoFailures(conSearchResp); + assertThat(conSearchResp.getHits().getTotalHits().value, equalTo(1L)); + Explanation conSearchExplain = conSearchResp.getHits().getHits()[0].getExplanation(); + assertThat(conSearchExplain.getValue(), equalTo(conSearchResp.getHits().getHits()[0].getScore())); + + // assert that the explanation for concurrent search should be equal to the non-concurrent search's explanation + assertEquals(nonConSearchExplain, conSearchExplain); + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()).build()) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java index 6d0b074c3a660..8eeffcbecb377 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/nested/SimpleNestedIT.java @@ -455,7 +455,13 @@ public void testDeleteNestedDocsWithAlias() throws Exception { assertDocumentCount("test", 6); } - public void testExplain() throws Exception { + /* + * Tests the explain output for single doc. Concurrent search with only slice 1 is tested + * here as call to indexRandomForMultipleSlices has implications on the range of child docs + * in the explain output. Separate test class is created to test explain for multiple slices + * case in concurrent search, refer {@link SimpleNestedExplainIT} + * */ + public void testExplainWithSingleDoc() throws Exception { assertAcked( prepareCreate("test").setMapping( jsonBuilder().startObject() @@ -487,7 +493,6 @@ public void testExplain() throws Exception { ) .setRefreshPolicy(IMMEDIATE) .get(); - indexRandomForConcurrentSearch("test"); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(nestedQuery("nested1", termQuery("nested1.n_field1", "n_value1"), ScoreMode.Total)) From b0426881ed6e0271ef9110b00e3d44ad83116935 Mon Sep 17 00:00:00 2001 From: Jay Deng Date: Thu, 11 Jan 2024 05:26:15 -0800 Subject: [PATCH 63/81] Correctly calculate doc_count_error at the slice level for concurrent segment search. Change slice_size heuristic to be equal to shard_size. (#11732) Signed-off-by: Jay Deng --- CHANGELOG.md | 1 + .../aggregations/bucket/ShardSizeTermsIT.java | 18 +- .../bucket/TermsDocCountErrorIT.java | 10 +- .../bucket/TermsFixedDocCountErrorIT.java | 347 ++++++++++++++++++ .../bucket/TermsShardMinDocCountIT.java | 16 +- .../bucket/terms/InternalTerms.java | 21 +- .../search/internal/SearchContext.java | 11 +- 7 files changed, 403 insertions(+), 21 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b256473dec6f..b638b63dd52a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -192,6 +192,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Restore support for Java 8 for RestClient ([#11562](https://github.com/opensearch-project/OpenSearch/pull/11562)) - Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) - Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) +- Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java index 145830f02ee56..7c7cc12888307 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/ShardSizeTermsIT.java @@ -86,6 +86,7 @@ public void testShardSizeEqualsSizeString() throws Exception { terms("keys").field("key") .size(3) .shardSize(3) + .showTermDocCountError(true) .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.count(false)) ) @@ -98,8 +99,11 @@ public void testShardSizeEqualsSizeString() throws Exception { expected.put("1", 8L); expected.put("3", 8L); expected.put("2", 4L); + Long expectedDocCount; for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsString()))); + expectedDocCount = expected.get(bucket.getKeyAsString()); + // Doc count can vary when using concurrent segment search. See https://github.com/opensearch-project/OpenSearch/issues/11680 + assertTrue((bucket.getDocCount() == expectedDocCount) || bucket.getDocCount() + bucket.getDocCountError() >= expectedDocCount); } } @@ -221,6 +225,7 @@ public void testShardSizeEqualsSizeLong() throws Exception { terms("keys").field("key") .size(3) .shardSize(3) + .showTermDocCountError(true) .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.count(false)) ) @@ -233,8 +238,11 @@ public void testShardSizeEqualsSizeLong() throws Exception { expected.put(1, 8L); expected.put(3, 8L); expected.put(2, 4L); + Long expectedDocCount; for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + expectedDocCount = expected.get(bucket.getKeyAsNumber().intValue()); + // Doc count can vary when using concurrent segment search. See https://github.com/opensearch-project/OpenSearch/issues/11680 + assertTrue((bucket.getDocCount() == expectedDocCount) || bucket.getDocCount() + bucket.getDocCountError() >= expectedDocCount); } } @@ -355,6 +363,7 @@ public void testShardSizeEqualsSizeDouble() throws Exception { terms("keys").field("key") .size(3) .shardSize(3) + .showTermDocCountError(true) .collectMode(randomFrom(SubAggCollectionMode.values())) .order(BucketOrder.count(false)) ) @@ -367,8 +376,11 @@ public void testShardSizeEqualsSizeDouble() throws Exception { expected.put(1, 8L); expected.put(3, 8L); expected.put(2, 4L); + Long expectedDocCount; for (Terms.Bucket bucket : buckets) { - assertThat(bucket.getDocCount(), equalTo(expected.get(bucket.getKeyAsNumber().intValue()))); + expectedDocCount = expected.get(bucket.getKeyAsNumber().intValue()); + // Doc count can vary when using concurrent segment search. See https://github.com/opensearch-project/OpenSearch/issues/11680 + assertTrue((bucket.getDocCount() == expectedDocCount) || bucket.getDocCount() + bucket.getDocCountError() >= expectedDocCount); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java index b355ce6d7a8dd..343cea4b94c87 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsDocCountErrorIT.java @@ -225,8 +225,16 @@ public void setupSuiteScopeCluster() throws Exception { } indexRandom(true, builders); - indexRandomForMultipleSlices("idx"); ensureSearchable(); + + // Force merge each shard down to 1 segment to verify results are the same between concurrent and non-concurrent search paths, else + // for concurrent segment search there will be additional error introduced during the slice level reduce and thus different buckets, + // doc_counts, and doc_count_errors may be returned. This test serves to verify that the doc_count_error is the same between + // concurrent and non-concurrent search in the 1 slice case. TermsFixedDocCountErrorIT verifies that the doc count error is + // correctly calculated for concurrent segment search at the slice level. + // See https://github.com/opensearch-project/OpenSearch/issues/11680" + forceMerge(1); + Thread.sleep(5000); // Sleep 5s to ensure force merge completes } private void assertDocCountErrorWithinBounds(int size, SearchResponse accurateResponse, SearchResponse testResponse) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java new file mode 100644 index 0000000000000..5ad913e8c7086 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsFixedDocCountErrorIT.java @@ -0,0 +1,347 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.search.aggregations.bucket.terms.Terms; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedOpenSearchIntegTestCase; + +import java.util.Arrays; +import java.util.Collection; + +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.search.aggregations.AggregationBuilders.terms; +import static org.opensearch.test.OpenSearchIntegTestCase.Scope.TEST; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = TEST, numClientNodes = 0, maxNumDataNodes = 1, supportsDedicatedMasters = false) +public class TermsFixedDocCountErrorIT extends ParameterizedOpenSearchIntegTestCase { + + private static final String STRING_FIELD_NAME = "s_value"; + + public TermsFixedDocCountErrorIT(Settings dynamicSettings) { + super(dynamicSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() } + ); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + + public void testSimpleAggErrorMultiShard() throws Exception { + // size = 1, shard_size = 2 + // Shard_1 [A, A, A, A, B, B, C, C, D, D] -> Buckets {"A" : 4, "B" : 2} + // Shard_2 [A, B, B, B, C, C, C, D, D, D] -> Buckets {"B" : 3, "C" : 3} + // coordinator -> Buckets {"B" : 5, "A" : 4} + // Agg error is 4, from (shard_size)th bucket on each shard + // Bucket "A" error is 2, from (shard_size)th bucket on shard_2 + // Bucket "B" error is 0, it's present on both shards + + // size = 1 shard_size = 1 slice_size = 1 + // non-cs / cs + // Shard_1 [A, B, C] + // Shard_2 [B, C, D] + // cs + // Shard_1 slice_1 [A, B, C] -> {a : 1} -> {a : 1 -- error: 1} + // slice_2 [B, C, D] -> {b : 1} + // Coordinator should return the same doc count error in both cases + + assertAcked( + prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_1"); + + IndicesSegmentResponse segmentResponse = client().admin().indices().prepareSegments("idx_mshard_1").get(); + assertEquals(1, segmentResponse.getIndices().get("idx_mshard_1").getShards().get(0).getShards()[0].getSegments().size()); + + assertAcked( + prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_2"); + + segmentResponse = client().admin().indices().prepareSegments("idx_mshard_2").get(); + assertEquals(1, segmentResponse.getIndices().get("idx_mshard_2").getShards().get(0).getShards()[0].getSegments().size()); + + SearchResponse response = client().prepareSearch("idx_mshard_2", "idx_mshard_1") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(4, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); // Bucket "B" + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); // Bucket "A" + assertEquals("A", bucket.getKey().toString()); + assertEquals(4, bucket.getDocCount()); + assertEquals(2, bucket.getDocCountError()); + } + + public void testSimpleAggErrorSingleShard() throws Exception { + assertAcked( + prepareCreate("idx_shard_error").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_shard_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + refresh("idx_shard_error"); + + SearchResponse response = client().prepareSearch("idx_shard_error") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(1).shardSize(2)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(1, terms.getBuckets().size()); + assertEquals(0, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); + assertEquals("A", bucket.getKey().toString()); + assertEquals(6, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + } + + public void testSliceLevelDocCountErrorSingleShard() throws Exception { + assumeTrue( + "Slice level error is not relevant to non-concurrent search cases", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + + // Slices are created by sorting segments by doc count in descending order then distributing in round robin fashion. + // Creates 2 segments (and therefore 2 slices since slice_count = 2) as follows: + // 1. [A, A, A, B, B, C] + // 2. [A, B, B, B, C, C] + // Thus we expect the doc count error for A to be 2 as the nth largest bucket on slice 2 has size 2 + + assertAcked( + prepareCreate("idx_slice_error").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_slice_error"); + + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + client().prepareIndex("idx_slice_error").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_slice_error"); + + IndicesSegmentResponse segmentResponse = client().admin().indices().prepareSegments("idx_slice_error").get(); + assertEquals(2, segmentResponse.getIndices().get("idx_slice_error").getShards().get(0).getShards()[0].getSegments().size()); + + // Confirm that there is no error when shard_size == slice_size > cardinality + SearchResponse response = client().prepareSearch("idx_slice_error") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(1).shardSize(4)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(1, terms.getBuckets().size()); + assertEquals(0, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); // Bucket "B" + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + response = client().prepareSearch("idx_slice_error") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(4, terms.getDocCountError()); + + bucket = terms.getBuckets().get(0); // Bucket "B" + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); // Bucket "A" + assertEquals("A", bucket.getKey().toString()); + assertEquals(3, bucket.getDocCount()); + assertEquals(2, bucket.getDocCountError()); + } + + public void testSliceLevelDocCountErrorMultiShard() throws Exception { + assumeTrue( + "Slice level error is not relevant to non-concurrent search cases", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + + // Size = 2, shard_size = 2 + // Shard_1 [A, A, A, A, B, B, C, C] + // slice_1 [A, A, A, B, B, C] {"A" : 3, "B" : 2} + // slice_2 [A, C] {"A" : 1, "C" : 1} + // Shard_1 buckets: {"A" : 4 - error: 0, "B" : 2 - error: 1} + // Shard_2 [A, A, B, B, B, C, C, C] + // slice_1 [A, B, B, B, C, C] {"B" : 3, "C" : 2} + // slice_2 [A, C] {"A" : 1, "C" : 1} + // Shard_2 buckets: {"B" : 3 - error: 1, "C" : 3 - error: 0} + // Overall + // {"B" : 5 - error: 2, "A" : 4 - error: 3} Agg error: 6 + + assertAcked( + prepareCreate("idx_mshard_1").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_1"); + + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_1").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_1"); + + IndicesSegmentResponse segmentResponse = client().admin().indices().prepareSegments("idx_mshard_1").get(); + assertEquals(2, segmentResponse.getIndices().get("idx_mshard_1").getShards().get(0).getShards()[0].getSegments().size()); + + SearchResponse response = client().prepareSearch("idx_mshard_1") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + Terms terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(3, terms.getDocCountError()); + + Terms.Bucket bucket = terms.getBuckets().get(0); + assertEquals("A", bucket.getKey().toString()); + assertEquals(4, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); + assertEquals("B", bucket.getKey().toString()); + assertEquals(2, bucket.getDocCount()); + assertEquals(1, bucket.getDocCountError()); + + assertAcked( + prepareCreate("idx_mshard_2").setMapping(STRING_FIELD_NAME, "type=keyword") + .setSettings( + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + ); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "B").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_2"); + + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "A").endObject()).get(); + client().prepareIndex("idx_mshard_2").setSource(jsonBuilder().startObject().field(STRING_FIELD_NAME, "C").endObject()).get(); + refresh("idx_mshard_2"); + + segmentResponse = client().admin().indices().prepareSegments("idx_mshard_2").get(); + assertEquals(2, segmentResponse.getIndices().get("idx_mshard_2").getShards().get(0).getShards()[0].getSegments().size()); + + response = client().prepareSearch("idx_mshard_2") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(3, terms.getDocCountError()); + + bucket = terms.getBuckets().get(0); + assertEquals("B", bucket.getKey().toString()); + assertEquals(3, bucket.getDocCount()); + assertEquals(1, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); + assertEquals("C", bucket.getKey().toString()); + assertEquals(3, bucket.getDocCount()); + assertEquals(0, bucket.getDocCountError()); + + response = client().prepareSearch("idx_mshard_2", "idx_mshard_1") + .setSize(0) + .addAggregation(terms("terms").field(STRING_FIELD_NAME).showTermDocCountError(true).size(2).shardSize(2)) + .get(); + + terms = response.getAggregations().get("terms"); + assertEquals(2, terms.getBuckets().size()); + assertEquals(6, terms.getDocCountError()); + + bucket = terms.getBuckets().get(0); + assertEquals("B", bucket.getKey().toString()); + assertEquals(5, bucket.getDocCount()); + assertEquals(2, bucket.getDocCountError()); + + bucket = terms.getBuckets().get(1); + assertEquals("A", bucket.getKey().toString()); + assertEquals(4, bucket.getDocCount()); + assertEquals(3, bucket.getDocCountError()); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index b0d8e7ea02e8f..3851b16551795 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -88,6 +88,10 @@ private static String randomExecutionHint() { // see https://github.com/elastic/elasticsearch/issues/5998 public void testShardMinDocCountSignificantTermsTest() throws Exception { + assumeFalse( + "For concurrent segment search shard_min_doc_count is not enforced at the slice level. See https://github.com/opensearch-project/OpenSearch/issues/11847", + internalCluster().clusterService().getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); String textMappings; if (randomBoolean()) { textMappings = "type=long"; @@ -157,6 +161,10 @@ private void addTermsDocs(String term, int numInClass, int numNotInClass, List aggregations) { private long getDocCountError(InternalTerms terms, ReduceContext reduceContext) { int size = terms.getBuckets().size(); - // doc_count_error is always computed at the coordinator based on the buckets returned by the shards. This should be 0 during the - // shard level reduce as no buckets are being pruned at this stage. - if (reduceContext.isSliceLevel() || size == 0 || size < terms.getShardSize() || isKeyOrder(terms.order)) { + if (size == 0 || size < terms.getShardSize() || isKeyOrder(terms.order)) { return 0; } else if (InternalOrder.isCountDesc(terms.order)) { if (terms.getDocCountError() > 0) { @@ -398,6 +397,12 @@ public InternalAggregation reduce(List aggregations, Reduce for (InternalAggregation aggregation : aggregations) { @SuppressWarnings("unchecked") InternalTerms terms = (InternalTerms) aggregation; + // For Concurrent Segment Search the aggregation will have a computed doc count error coming from the shards. + // We use the existence of this doc count error to determine whether or not doc count error originated from the slice level + // and if so we will maintain the doc count error for the 1 shard case at the coordinator level + if (aggregations.size() == 1 && terms.getDocCountError() > 0) { + hasSliceLevelDocCountError = true; + } if (referenceTerms == null && aggregation.getClass().equals(UnmappedTerms.class) == false) { referenceTerms = terms; } @@ -500,7 +505,11 @@ For backward compatibility, we disable the merge sort and use ({@link InternalTe if (sumDocCountError == -1) { docCountError = -1; } else { - docCountError = aggregations.size() == 1 ? 0 : sumDocCountError; + if (hasSliceLevelDocCountError) { + docCountError = sumDocCountError; + } else { + docCountError = aggregations.size() == 1 ? 0 : sumDocCountError; + } } // Shards must return buckets sorted by key, so we apply the sort here in shard level reduce @@ -512,7 +521,7 @@ For backward compatibility, we disable the merge sort and use ({@link InternalTe @Override protected B reduceBucket(List buckets, ReduceContext context) { - assert buckets.size() > 0; + assert !buckets.isEmpty(); long docCount = 0; // For the per term doc count error we add up the errors from the // shards that did not respond with the term. To do this we add up @@ -523,7 +532,7 @@ protected B reduceBucket(List buckets, ReduceContext context) { for (B bucket : buckets) { docCount += bucket.getDocCount(); if (docCountError != -1) { - if (bucket.showDocCountError() == false || bucket.getDocCountError() == -1) { + if (bucket.showDocCountError() == false) { docCountError = -1; } else { docCountError += bucket.getDocCountError(); diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index cc43f4e5d79fb..02837da64dafd 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -86,8 +86,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicBoolean; -import static org.opensearch.search.aggregations.bucket.BucketUtils.suggestShardSideQueueSize; - /** * This class encapsulates the state needed to execute a search. It holds a reference to the * shards point in time snapshot (IndexReader / ContextIndexSearcher) and allows passing on @@ -410,11 +408,10 @@ public boolean shouldUseConcurrentSearch() { * Returns local bucket count thresholds based on concurrent segment search status */ public LocalBucketCountThresholds asLocalBucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { - if (shouldUseConcurrentSearch()) { - return new LocalBucketCountThresholds(0, suggestShardSideQueueSize(bucketCountThresholds.getShardSize())); - } else { - return new LocalBucketCountThresholds(bucketCountThresholds.getShardMinDocCount(), bucketCountThresholds.getShardSize()); - } + return new LocalBucketCountThresholds( + shouldUseConcurrentSearch() ? 0 : bucketCountThresholds.getShardMinDocCount(), + bucketCountThresholds.getShardSize() + ); } /** From 3d577145cc1c3d2ec916ca3ff5a3472c98132f2b Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 11 Jan 2024 06:41:55 -0800 Subject: [PATCH 64/81] Update runTask to optionally install plugins (#11844) Signed-off-by: Marc Handalian --- DEVELOPER_GUIDE.md | 8 +++++++- gradle/run.gradle | 6 ++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index f9936aad0cf8c..21adbb0305ab1 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -183,6 +183,12 @@ Run OpenSearch using `gradlew run`. ./gradlew run ``` +[Plugins](plugins/) may be installed by passing a `-PinstalledPlugins` property: + +```bash +./gradlew run -PinstalledPlugins="['plugin1', 'plugin2']" +``` + That will build OpenSearch and start it, writing its log above Gradle's status message. We log a lot of stuff on startup, specifically these lines tell you that OpenSearch is ready. ``` @@ -578,7 +584,7 @@ explicitly marked by an annotation should not be extended by external implementa any time. The `@DeprecatedApi` annotation could also be added to any classes annotated with `@PublicApi` (or documented as `@opensearch.api`) or their methods that are either changed (with replacement) or planned to be removed across major versions. -The APIs which are designated to be public but have not been stabilized yet should be marked with `@ExperimentalApi` (or documented as `@opensearch.experimental`) +The APIs which are designated to be public but have not been stabilized yet should be marked with `@ExperimentalApi` (or documented as `@opensearch.experimental`) annotation. The presence of this annotation signals that API may change at any time (major, minor or even patch releases). In general, the classes annotated with `@PublicApi` may expose other classes or methods annotated with `@ExperimentalApi`, in such cases the backward compatibility guarantees would not apply to latter (see please [Experimental Development](#experimental-development) for more details). diff --git a/gradle/run.gradle b/gradle/run.gradle index 639479e97d28f..34651f1d94964 100644 --- a/gradle/run.gradle +++ b/gradle/run.gradle @@ -39,6 +39,12 @@ testClusters { testDistribution = 'archive' if (numZones > 1) numberOfZones = numZones if (numNodes > 1) numberOfNodes = numNodes + if (findProperty("installedPlugins")) { + installedPlugins = Eval.me(installedPlugins) + for (String p : installedPlugins) { + plugin('plugins:'.concat(p)) + } + } } } From 3cf1ce68e6dff44033366aee2e8b77597a07d8ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jan 2024 11:22:59 -0500 Subject: [PATCH 65/81] Bump com.diffplug.spotless from 6.20.0 to 6.23.2 (#11797) Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index b1cd1d532bfeb..296c30391af09 100644 --- a/build.gradle +++ b/build.gradle @@ -54,7 +54,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.20.0" apply false + id "com.diffplug.spotless" version "6.23.2" apply false id "org.gradle.test-retry" version "1.5.4" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From 6aab36052055e24c2ef56454da9d3a1e4982ee6e Mon Sep 17 00:00:00 2001 From: Chenyang Ji Date: Thu, 11 Jan 2024 12:45:43 -0800 Subject: [PATCH 66/81] Support dynamically adding SearchRequestOperationsListener (#11526) Along the way, also refactored TransportSearchAction.TimeProvider, so that it's no longer a (redundant) listener. --------- Signed-off-by: Chenyang Ji --- CHANGELOG.md | 1 + .../search/SearchWeightedRoutingIT.java | 2 +- .../search/stats/SearchStatsIT.java | 2 +- .../search/AbstractSearchAsyncAction.java | 4 +- .../action/search/SearchRequestContext.java | 20 +-- ...estOperationsCompositeListenerFactory.java | 81 +++++++++++ .../SearchRequestOperationsListener.java | 27 +++- .../action/search/SearchRequestSlowLog.java | 18 ++- .../action/search/SearchRequestStats.java | 14 +- .../action/search/SearchResponseMerger.java | 6 +- .../action/search/TransportSearchAction.java | 135 ++++-------------- .../common/settings/ClusterSettings.java | 5 +- .../main/java/org/opensearch/node/Node.java | 16 ++- .../cluster/node/stats/NodeStatsTests.java | 9 +- .../AbstractSearchAsyncActionTests.java | 24 +++- .../CanMatchPreFilterSearchPhaseTests.java | 31 +++- .../action/search/SearchAsyncActionTests.java | 12 +- .../SearchQueryThenFetchAsyncActionTests.java | 7 +- ...erationsCompositeListenerFactoryTests.java | 131 +++++++++++++++++ ...earchRequestOperationsListenerSupport.java | 12 +- .../search/SearchRequestSlowLogTests.java | 24 +++- .../search/SearchRequestStatsTests.java | 35 ++++- .../search/SearchResponseMergerTests.java | 97 +++++++++++-- .../search/SearchTimeProviderTests.java | 54 ------- .../search/TransportSearchActionTests.java | 37 ++++- .../index/search/stats/SearchStatsTests.java | 5 +- .../indices/NodeIndicesStatsTests.java | 5 +- .../snapshots/SnapshotResiliencyTests.java | 9 +- 28 files changed, 577 insertions(+), 246 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactory.java create mode 100644 server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java delete mode 100644 server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index b638b63dd52a0..1f64dcf82bd4d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -193,6 +193,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add deleted doc count in _cat/shards ([#11678](https://github.com/opensearch-project/OpenSearch/pull/11678)) - Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) - Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) +- Added Support for dynamically adding SearchRequestOperationsListeners with SearchRequestOperationsCompositeListenerFactory ([#11526](https://github.com/opensearch-project/OpenSearch/pull/11526)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java index aa1fe695ecc12..d1e66c19c28e2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchWeightedRoutingIT.java @@ -57,7 +57,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; +import static org.opensearch.action.search.SearchRequestStats.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.search.aggregations.AggregationBuilders.terms; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java index 253a8b2b14824..8fb3c57dd7680 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/stats/SearchStatsIT.java @@ -64,7 +64,7 @@ import java.util.Set; import java.util.function.Function; -import static org.opensearch.action.search.TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED_KEY; +import static org.opensearch.action.search.SearchRequestStats.SEARCH_REQUEST_STATS_ENABLED_KEY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; diff --git a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java index f18bbb8a1cc13..5b41c2a13b596 100644 --- a/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/AbstractSearchAsyncAction.java @@ -214,7 +214,7 @@ public final void start() { 0, 0, buildTookInMillis(), - timeProvider.getPhaseTook(), + searchRequestContext.getPhaseTook(), ShardSearchFailure.EMPTY_ARRAY, clusters, null @@ -670,7 +670,7 @@ protected final SearchResponse buildSearchResponse( successfulOps.get(), skippedOps.get(), buildTookInMillis(), - timeProvider.getPhaseTook(), + searchRequestContext.getPhaseTook(), failures, clusters, searchContextId diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java index 674363600b251..eceac7204b196 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java @@ -8,13 +8,11 @@ package org.opensearch.action.search; -import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.TotalHits; import org.opensearch.common.annotation.InternalApi; import java.util.EnumMap; import java.util.HashMap; -import java.util.List; import java.util.Locale; import java.util.Map; @@ -31,18 +29,14 @@ class SearchRequestContext { private TotalHits totalHits; private final EnumMap shardStats; - /** - * This constructor is for testing only - */ - SearchRequestContext() { - this(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger())); - } + private final SearchRequest searchRequest; - SearchRequestContext(SearchRequestOperationsListener searchRequestOperationsListener) { + SearchRequestContext(final SearchRequestOperationsListener searchRequestOperationsListener, final SearchRequest searchRequest) { this.searchRequestOperationsListener = searchRequestOperationsListener; this.absoluteStartNanos = System.nanoTime(); this.phaseTookMap = new HashMap<>(); this.shardStats = new EnumMap<>(ShardStatsFieldNames.class); + this.searchRequest = searchRequest; } SearchRequestOperationsListener getSearchRequestOperationsListener() { @@ -57,6 +51,14 @@ Map phaseTookMap() { return phaseTookMap; } + SearchResponse.PhaseTook getPhaseTook() { + if (searchRequest != null && searchRequest.isPhaseTook() != null && searchRequest.isPhaseTook()) { + return new SearchResponse.PhaseTook(phaseTookMap); + } else { + return null; + } + } + /** * Override absoluteStartNanos set in constructor. * For testing only diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactory.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactory.java new file mode 100644 index 0000000000000..db487bf945889 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactory.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.Logger; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * SearchRequestOperationsCompositeListenerFactory contains listeners registered to search requests, + * and is responsible for creating the {@link SearchRequestOperationsListener.CompositeListener} + * with the all listeners enabled at cluster-level and request-level. + * + * + * @opensearch.internal + */ +public final class SearchRequestOperationsCompositeListenerFactory { + private final List searchRequestListenersList; + + /** + * Create the SearchRequestOperationsCompositeListenerFactory and add multiple {@link SearchRequestOperationsListener} + * to the searchRequestListenersList. + * Those enabled listeners will be executed during each search request. + * + * @param listeners Multiple SearchRequestOperationsListener object to add. + * @throws IllegalArgumentException if any input listener is null. + */ + public SearchRequestOperationsCompositeListenerFactory(final SearchRequestOperationsListener... listeners) { + searchRequestListenersList = new ArrayList<>(); + for (SearchRequestOperationsListener listener : listeners) { + if (listener == null) { + throw new IllegalArgumentException("listener must not be null"); + } + searchRequestListenersList.add(listener); + } + } + + /** + * Get searchRequestListenersList, + * + * @return List of SearchRequestOperationsListener + */ + public List getListeners() { + return searchRequestListenersList; + } + + /** + * Create the {@link SearchRequestOperationsListener.CompositeListener} + * with the all listeners enabled at cluster-level and request-level. + * + * @param searchRequest The SearchRequest object used to decide which request-level listeners to add based on states/flags + * @param logger Logger to be attached to the {@link SearchRequestOperationsListener.CompositeListener} + * @param perRequestListeners the per-request listeners that can be optionally added to the returned CompositeListener list. + * @return SearchRequestOperationsListener.CompositeListener + */ + public SearchRequestOperationsListener.CompositeListener buildCompositeListener( + final SearchRequest searchRequest, + final Logger logger, + final SearchRequestOperationsListener... perRequestListeners + ) { + final List searchListenersList = Stream.concat( + searchRequestListenersList.stream(), + Arrays.stream(perRequestListeners) + ) + .filter((searchRequestOperationsListener -> searchRequestOperationsListener.isEnabled(searchRequest))) + .collect(Collectors.toList()); + + return new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java index 19ce0beb3c493..2a09cc084f79f 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestOperationsListener.java @@ -20,7 +20,16 @@ * @opensearch.internal */ @InternalApi -abstract class SearchRequestOperationsListener { +public abstract class SearchRequestOperationsListener { + private volatile boolean enabled; + + protected SearchRequestOperationsListener() { + this.enabled = true; + } + + protected SearchRequestOperationsListener(final boolean enabled) { + this.enabled = enabled; + } abstract void onPhaseStart(SearchPhaseContext context); @@ -32,6 +41,18 @@ void onRequestStart(SearchRequestContext searchRequestContext) {} void onRequestEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + boolean isEnabled(SearchRequest searchRequest) { + return isEnabled(); + } + + boolean isEnabled() { + return enabled; + } + + protected void setEnabled(final boolean enabled) { + this.enabled = enabled; + } + /** * Holder of Composite Listeners * @@ -101,5 +122,9 @@ public void onRequestEnd(SearchPhaseContext context, SearchRequestContext search } } } + + public List getListeners() { + return listeners; + } } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java index a55cfd463a78f..7f25f9026f215 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestSlowLog.java @@ -116,11 +116,11 @@ public SearchRequestSlowLog(ClusterService clusterService) { this.logger = logger; Loggers.setLevel(this.logger, SlowLogLevel.TRACE.name()); - this.warnThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING).nanos(); - this.infoThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING).nanos(); - this.debugThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING).nanos(); - this.traceThreshold = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING).nanos(); - this.level = clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL); + this.setWarnThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING)); + this.setInfoThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_INFO_SETTING)); + this.setDebugThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_DEBUG_SETTING)); + this.setTraceThreshold(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_TRACE_SETTING)); + this.setLevel(clusterService.getClusterSettings().get(CLUSTER_SEARCH_REQUEST_SLOWLOG_LEVEL)); clusterService.getClusterSettings() .addSettingsUpdateConsumer(CLUSTER_SEARCH_REQUEST_SLOWLOG_THRESHOLD_WARN_SETTING, this::setWarnThreshold); @@ -233,18 +233,22 @@ private static String escapeJson(String text) { void setWarnThreshold(TimeValue warnThreshold) { this.warnThreshold = warnThreshold.nanos(); + setEnabledIfThresholdExceed(); } void setInfoThreshold(TimeValue infoThreshold) { this.infoThreshold = infoThreshold.nanos(); + setEnabledIfThresholdExceed(); } void setDebugThreshold(TimeValue debugThreshold) { this.debugThreshold = debugThreshold.nanos(); + setEnabledIfThresholdExceed(); } void setTraceThreshold(TimeValue traceThreshold) { this.traceThreshold = traceThreshold.nanos(); + setEnabledIfThresholdExceed(); } void setLevel(SlowLogLevel level) { @@ -270,4 +274,8 @@ protected long getTraceThreshold() { SlowLogLevel getLevel() { return level; } + + private void setEnabledIfThresholdExceed() { + super.setEnabled(this.warnThreshold >= 0 || this.debugThreshold >= 0 || this.infoThreshold >= 0 || this.traceThreshold >= 0); + } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java index 262750849eaa9..88d599a0dcdaa 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestStats.java @@ -12,6 +12,8 @@ import org.opensearch.common.inject.Inject; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.metrics.MeanMetric; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import java.util.EnumMap; import java.util.Map; @@ -26,8 +28,18 @@ public final class SearchRequestStats extends SearchRequestOperationsListener { Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); + public static final String SEARCH_REQUEST_STATS_ENABLED_KEY = "search.request_stats_enabled"; + public static final Setting SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( + SEARCH_REQUEST_STATS_ENABLED_KEY, + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + @Inject - public SearchRequestStats() { + public SearchRequestStats(ClusterSettings clusterSettings) { + this.setEnabled(clusterSettings.get(SEARCH_REQUEST_STATS_ENABLED)); + clusterSettings.addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setEnabled); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { phaseStatsMap.put(searchPhaseName, new StatsHolder()); } diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java index 054bd578cc56c..538e7fd54e2c3 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseMerger.java @@ -110,7 +110,7 @@ final class SearchResponseMerger { /** * Add a search response to the list of responses to be merged together into one. * Merges currently happen at once when all responses are available and - * {@link #getMergedResponse(SearchResponse.Clusters)} )} is called. + * {@link #getMergedResponse(SearchResponse.Clusters, SearchRequestContext)} )} is called. * That may change in the future as it's possible to introduce incremental merges as responses come in if necessary. */ void add(SearchResponse searchResponse) { @@ -126,7 +126,7 @@ int numResponses() { * Returns the merged response. To be called once all responses have been added through {@link #add(SearchResponse)} * so that all responses are merged into a single one. */ - SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { + SearchResponse getMergedResponse(SearchResponse.Clusters clusters, SearchRequestContext searchRequestContext) { // if the search is only across remote clusters, none of them are available, and all of them have skip_unavailable set to true, // we end up calling merge without anything to merge, we just return an empty search response if (searchResponses.size() == 0) { @@ -236,7 +236,7 @@ SearchResponse getMergedResponse(SearchResponse.Clusters clusters) { successfulShards, skippedShards, tookInMillis, - searchTimeProvider.getPhaseTook(), + searchRequestContext.getPhaseTook(), shardFailures, clusters, null diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 05f4308df74fa..842c10b700d24 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -98,7 +98,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -154,20 +153,12 @@ public class TransportSearchAction extends HandledTransportAction SEARCH_REQUEST_STATS_ENABLED = Setting.boolSetting( - SEARCH_REQUEST_STATS_ENABLED_KEY, - false, - Property.Dynamic, - Property.NodeScope - ); - public static final String SEARCH_PHASE_TOOK_ENABLED_KEY = "search.phase_took_enabled"; public static final Setting SEARCH_PHASE_TOOK_ENABLED = Setting.boolSetting( SEARCH_PHASE_TOOK_ENABLED_KEY, false, - Property.Dynamic, - Property.NodeScope + Setting.Property.Dynamic, + Setting.Property.NodeScope ); private final NodeClient client; @@ -181,14 +172,10 @@ public class TransportSearchAction extends HandledTransportAction) SearchRequest::new); this.client = client; @@ -224,12 +210,9 @@ public TransportSearchAction( this.indexNameExpressionResolver = indexNameExpressionResolver; this.namedWriteableRegistry = namedWriteableRegistry; this.searchPipelineService = searchPipelineService; - this.isRequestStatsEnabled = clusterService.getClusterSettings().get(SEARCH_REQUEST_STATS_ENABLED); - clusterService.getClusterSettings().addSettingsUpdateConsumer(SEARCH_REQUEST_STATS_ENABLED, this::setIsRequestStatsEnabled); - this.searchRequestStats = searchRequestStats; - this.searchRequestSlowLog = searchRequestSlowLog; this.metricsRegistry = metricsRegistry; this.searchQueryMetricsEnabled = clusterService.getClusterSettings().get(SEARCH_QUERY_METRICS_ENABLED_SETTING); + this.searchRequestOperationsCompositeListenerFactory = searchRequestOperationsCompositeListenerFactory; clusterService.getClusterSettings() .addSettingsUpdateConsumer(SEARCH_QUERY_METRICS_ENABLED_SETTING, this::setSearchQueryMetricsEnabled); } @@ -241,10 +224,6 @@ private void setSearchQueryMetricsEnabled(boolean searchQueryMetricsEnabled) { } } - private void setIsRequestStatsEnabled(boolean isRequestStatsEnabled) { - this.isRequestStatsEnabled = isRequestStatsEnabled; - } - private Map buildPerIndexAliasFilter( SearchRequest request, ClusterState clusterState, @@ -289,8 +268,6 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust } /** - * Listener to track request-level tookTime and phase tookTimes from the coordinator. - * * Search operations need two clocks. One clock is to fulfill real clock needs (e.g., resolving * "now" to an index name). Another clock is needed for measuring how long a search operation * took. These two uses are at odds with each other. There are many issues with using a real @@ -300,12 +277,10 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * * @opensearch.internal */ - static final class SearchTimeProvider extends SearchRequestOperationsListener { - + static final class SearchTimeProvider { private final long absoluteStartMillis; private final long relativeStartNanos; private final LongSupplier relativeCurrentNanosProvider; - private boolean phaseTook = false; /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -331,43 +306,6 @@ long getAbsoluteStartMillis() { long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } - - public void setPhaseTook(boolean phaseTook) { - this.phaseTook = phaseTook; - } - - SearchResponse.PhaseTook getPhaseTook() { - if (phaseTook) { - Map phaseTookMap = new HashMap<>(); - // Convert Map to Map for SearchResponse() - for (SearchPhaseName searchPhaseName : phaseStatsMap.keySet()) { - phaseTookMap.put(searchPhaseName.getName(), phaseStatsMap.get(searchPhaseName)); - } - return new SearchResponse.PhaseTook(phaseTookMap); - } else { - return null; - } - } - - Map phaseStatsMap = new EnumMap<>(SearchPhaseName.class); - - @Override - void onPhaseStart(SearchPhaseContext context) {} - - @Override - void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) { - phaseStatsMap.put( - context.getCurrentPhase().getSearchPhaseName(), - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - context.getCurrentPhase().getStartTimeInNanos()) - ); - } - - @Override - void onPhaseFailure(SearchPhaseContext context) {} - - public Long getPhaseTookTime(SearchPhaseName searchPhaseName) { - return phaseStatsMap.get(searchPhaseName); - } } @Override @@ -490,11 +428,12 @@ private void executeRequest( relativeStartNanos, System::nanoTime ); - - final List searchListenersList = createSearchListenerList(originalSearchRequest, timeProvider); - SearchRequestContext searchRequestContext = new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger) - ); + if (originalSearchRequest.isPhaseTook() == null) { + originalSearchRequest.setPhaseTook(clusterService.getClusterSettings().get(SEARCH_PHASE_TOOK_ENABLED)); + } + SearchRequestOperationsListener.CompositeListener requestOperationsListeners = searchRequestOperationsCompositeListenerFactory + .buildCompositeListener(originalSearchRequest, logger); + SearchRequestContext searchRequestContext = new SearchRequestContext(requestOperationsListeners, originalSearchRequest); searchRequestContext.getSearchRequestOperationsListener().onRequestStart(searchRequestContext); PipelinedRequest searchRequest; @@ -599,7 +538,8 @@ private ActionListener buildRewriteListener( searchContext, searchAsyncActionProvider, searchRequestContext - ) + ), + searchRequestContext ); } else { AtomicInteger skippedClusters = new AtomicInteger(0); @@ -687,7 +627,8 @@ static void ccsRemoteReduce( RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener listener, - BiConsumer> localSearchConsumer + BiConsumer> localSearchConsumer, + SearchRequestContext searchRequestContext ) { if (localIndices == null && remoteIndices.size() == 1) { @@ -729,7 +670,7 @@ public void onResponse(SearchResponse searchResponse) { searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), - timeProvider.getPhaseTook(), + searchRequestContext.getPhaseTook(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId() @@ -775,7 +716,8 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, totalClusters, - listener + listener, + searchRequestContext ); Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); remoteClusterClient.search(ccsSearchRequest, ccsListener); @@ -789,7 +731,8 @@ public void onFailure(Exception e) { exceptions, searchResponseMerger, totalClusters, - listener + listener, + searchRequestContext ); SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest( searchRequest, @@ -884,7 +827,8 @@ private static ActionListener createCCSListener( AtomicReference exceptions, SearchResponseMerger searchResponseMerger, int totalClusters, - ActionListener originalListener + ActionListener originalListener, + SearchRequestContext searchRequestContext ) { return new CCSActionListener( clusterAlias, @@ -906,7 +850,7 @@ SearchResponse createFinalResponse() { searchResponseMerger.numResponses(), skippedClusters.get() ); - return searchResponseMerger.getMergedResponse(clusters); + return searchResponseMerger.getMergedResponse(clusters, searchRequestContext); } }; } @@ -1244,35 +1188,6 @@ AbstractSearchAsyncAction asyncSearchAction( ); } - private List createSearchListenerList(SearchRequest searchRequest, SearchTimeProvider timeProvider) { - final List searchListenersList = new ArrayList<>(); - - if (isRequestStatsEnabled) { - searchListenersList.add(searchRequestStats); - } - - // phase_took is enabled with request param and/or cluster setting - Boolean phaseTookRequestParam = searchRequest.isPhaseTook(); - if (phaseTookRequestParam == null) { // check cluster setting only when request param is undefined - if (clusterService.getClusterSettings().get(TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED)) { - timeProvider.setPhaseTook(true); - searchListenersList.add(timeProvider); - } - } else if (phaseTookRequestParam == true) { - timeProvider.setPhaseTook(true); - searchListenersList.add(timeProvider); - } - - if (searchRequestSlowLog.getWarnThreshold() >= 0 - || searchRequestSlowLog.getInfoThreshold() >= 0 - || searchRequestSlowLog.getDebugThreshold() >= 0 - || searchRequestSlowLog.getTraceThreshold() >= 0) { - searchListenersList.add(searchRequestSlowLog); - } - - return searchListenersList; - } - private AbstractSearchAsyncAction searchAsyncAction( SearchTask task, SearchRequest searchRequest, diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fa4b0f475edc5..277286ae1ff1b 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; import org.opensearch.action.search.CreatePitController; import org.opensearch.action.search.SearchRequestSlowLog; +import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.support.AutoCreateIndex; import org.opensearch.action.support.DestructiveOperations; @@ -380,9 +381,9 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS, TransportSearchAction.SHARD_COUNT_LIMIT_SETTING, TransportSearchAction.SEARCH_CANCEL_AFTER_TIME_INTERVAL_SETTING, - TransportSearchAction.SEARCH_REQUEST_STATS_ENABLED, - TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, TransportSearchAction.SEARCH_QUERY_METRICS_ENABLED_SETTING, + TransportSearchAction.SEARCH_PHASE_TOOK_ENABLED, + SearchRequestStats.SEARCH_REQUEST_STATS_ENABLED, RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE, SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER, RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 4cbf8dc191a9d..8510122c39fcb 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -46,6 +46,8 @@ import org.opensearch.action.admin.cluster.snapshots.status.TransportNodesSnapshotsStatus; import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; +import org.opensearch.action.search.SearchRequestOperationsCompositeListenerFactory; +import org.opensearch.action.search.SearchRequestOperationsListener; import org.opensearch.action.search.SearchRequestSlowLog; import org.opensearch.action.search.SearchRequestStats; import org.opensearch.action.search.SearchTransportService; @@ -783,7 +785,7 @@ protected Node( threadPool ); - final SearchRequestStats searchRequestStats = new SearchRequestStats(); + final SearchRequestStats searchRequestStats = new SearchRequestStats(clusterService.getClusterSettings()); final SearchRequestSlowLog searchRequestSlowLog = new SearchRequestSlowLog(clusterService); remoteStoreStatsTrackerFactory = new RemoteStoreStatsTrackerFactory(clusterService, settings); @@ -879,6 +881,17 @@ protected Node( ) .collect(Collectors.toList()); + // register all standard SearchRequestOperationsCompositeListenerFactory to the SearchRequestOperationsCompositeListenerFactory + final SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory = + new SearchRequestOperationsCompositeListenerFactory( + Stream.concat( + Stream.of(searchRequestStats, searchRequestSlowLog), + pluginComponents.stream() + .filter(p -> p instanceof SearchRequestOperationsListener) + .map(p -> (SearchRequestOperationsListener) p) + ).toArray(SearchRequestOperationsListener[]::new) + ); + ActionModule actionModule = new ActionModule( settings, clusterModule.getIndexNameExpressionResolver(), @@ -1275,6 +1288,7 @@ protected Node( b.bind(RemoteClusterStateService.class).toProvider(() -> remoteClusterStateService); b.bind(PersistedStateRegistry.class).toInstance(persistedStateRegistry); b.bind(SegmentReplicationStatsTracker.class).toInstance(segmentReplicationStatsTracker); + b.bind(SearchRequestOperationsCompositeListenerFactory.class).toInstance(searchRequestOperationsCompositeListenerFactory); }); injector = modules.createInjector(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index b8ab5c935fa34..a5ca08f141560 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -44,6 +44,8 @@ import org.opensearch.cluster.service.ClusterStateStats; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; @@ -961,7 +963,12 @@ public void apply(String action, AdmissionControlActionType admissionControlActi private static NodeIndicesStats getNodeIndicesStats(boolean remoteStoreStats) { NodeIndicesStats indicesStats = null; if (remoteStoreStats) { - indicesStats = new NodeIndicesStats(new CommonStats(CommonStatsFlags.ALL), new HashMap<>(), new SearchRequestStats()); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + indicesStats = new NodeIndicesStats( + new CommonStats(CommonStatsFlags.ALL), + new HashMap<>(), + new SearchRequestStats(clusterSettings) + ); RemoteSegmentStats remoteSegmentStats = indicesStats.getSegments().getRemoteSegmentStats(); remoteSegmentStats.addUploadBytesStarted(10L); remoteSegmentStats.addUploadBytesSucceeded(10L); diff --git a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java index e17fbab32a12e..76129341fc9a2 100644 --- a/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/AbstractSearchAsyncActionTests.java @@ -32,12 +32,15 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.opensearch.action.OriginalIndices; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.set.Sets; @@ -175,7 +178,7 @@ private AbstractSearchAsyncAction createAction( results, request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) ) { @Override protected SearchPhase getNextPhase(final SearchPhaseResults results, SearchPhaseContext context) { @@ -328,7 +331,8 @@ public void testSendSearchResponseDisallowPartialFailures() { } public void testOnPhaseFailureAndVerifyListeners() { - SearchRequestStats testListener = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testListener = new SearchRequestStats(clusterSettings); final List requestOperationListeners = new ArrayList<>(List.of(testListener)); SearchQueryThenFetchAsyncAction action = createSearchQueryThenFetchAsyncAction(requestOperationListeners); @@ -591,7 +595,8 @@ public void onFailure(Exception e) { } public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedException { - SearchRequestStats testListener = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testListener = new SearchRequestStats(clusterSettings); final List requestOperationListeners = new ArrayList<>(List.of(testListener)); long delay = (randomIntBetween(1, 5)); @@ -640,7 +645,8 @@ public void testOnPhaseListenersWithQueryAndThenFetchType() throws InterruptedEx } public void testOnPhaseListenersWithDfsType() throws InterruptedException { - SearchRequestStats testListener = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testListener = new SearchRequestStats(clusterSettings); final List requestOperationListeners = new ArrayList<>(List.of(testListener)); SearchDfsQueryThenFetchAsyncAction searchDfsQueryThenFetchAsyncAction = createSearchDfsQueryThenFetchAsyncAction( @@ -710,7 +716,10 @@ private SearchDfsQueryThenFetchAsyncAction createSearchDfsQueryThenFetchAsyncAct null, task, SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger)) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), + searchRequest + ) ); } @@ -760,7 +769,10 @@ private SearchQueryThenFetchAsyncAction createSearchQueryThenFetchAsyncAction( null, task, SearchResponse.Clusters.EMPTY, - new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger)) + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(searchRequestOperationsListeners, logger), + searchRequest + ) ) { @Override ShardSearchFailure[] buildShardFailures() { diff --git a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java index 4ed4797efe604..56dcf66d5607d 100644 --- a/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java +++ b/server/src/test/java/org/opensearch/action/search/CanMatchPreFilterSearchPhaseTests.java @@ -31,6 +31,7 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.BytesRef; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; @@ -137,7 +138,10 @@ public void run() throws IOException { } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); canMatchPhase.start(); @@ -229,7 +233,10 @@ public void run() throws IOException { } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); canMatchPhase.start(); @@ -320,7 +327,10 @@ public void sendCanMatch( new ArraySearchPhaseResults<>(iter.size()), randomIntBetween(1, 32), SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ) { @Override @@ -348,7 +358,10 @@ protected void executePhaseOnShard( } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); canMatchPhase.start(); @@ -433,7 +446,10 @@ public void run() { } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); canMatchPhase.start(); @@ -533,7 +549,10 @@ public void run() { } }, SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); canMatchPhase.start(); diff --git a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java index 7b4fa1d8387df..af7adc4e58fb8 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchAsyncActionTests.java @@ -31,6 +31,7 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.cluster.ClusterState; @@ -61,6 +62,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -136,7 +138,7 @@ public void testSkipSearchShards() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) ) { @Override @@ -255,7 +257,7 @@ public void testLimitConcurrentShardRequests() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) ) { @Override @@ -373,7 +375,7 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) ) { TestSearchResponse response = new TestSearchResponse(); @@ -496,7 +498,7 @@ public void sendFreeContext(Transport.Connection connection, ShardSearchContextI new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) ) { TestSearchResponse response = new TestSearchResponse(); @@ -610,7 +612,7 @@ public void testAllowPartialResults() throws InterruptedException { new ArraySearchPhaseResults<>(shardsIter.size()), request.getMaxConcurrentShardRequests(), SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext(new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), request) ) { @Override protected void executePhaseOnShard( diff --git a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java index a8c0c43ac5080..faf6f86c69c27 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchQueryThenFetchAsyncActionTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopFieldDocs; @@ -63,6 +64,7 @@ import org.opensearch.transport.Transport; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; @@ -215,7 +217,10 @@ public void sendExecuteQuery( null, task, SearchResponse.Clusters.EMPTY, - new SearchRequestContext() + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ) { @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java new file mode 100644 index 0000000000000..78c5ba4412c68 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsCompositeListenerFactoryTests.java @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; + +public class SearchRequestOperationsCompositeListenerFactoryTests extends OpenSearchTestCase { + public void testAddAndGetListeners() { + SearchRequestOperationsListener testListener = createTestSearchRequestOperationsListener(); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener + ); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener, requestListeners.getListeners().get(0)); + } + + public void testStandardListenersEnabled() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + testListener1.setEnabled(false); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1, + testListener2 + ); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger + ); + List listeners = compositeListener.getListeners(); + assertEquals(1, listeners.size()); + assertEquals(testListener2, listeners.get(0)); + assertEquals(2, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + assertEquals(testListener2, requestListeners.getListeners().get(1)); + } + + public void testStandardListenersAndPerRequestListener() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1 + ); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + testListener1.setEnabled(true); + testListener2.setEnabled(true); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + searchRequest.setPhaseTook(true); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger, + testListener2 + ); + List listeners = compositeListener.getListeners(); + assertEquals(2, listeners.size()); + assertEquals(testListener1, listeners.get(0)); + assertEquals(testListener2, listeners.get(1)); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + } + + public void testStandardListenersDisabledAndPerRequestListener() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + testListener1.setEnabled(false); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1 + ); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger, + testListener2 + ); + List listeners = compositeListener.getListeners(); + assertEquals(1, listeners.size()); + assertEquals(testListener2, listeners.get(0)); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + assertFalse(requestListeners.getListeners().get(0).isEnabled()); + } + + public void testStandardListenerAndPerRequestListenerDisabled() { + SearchRequestOperationsListener testListener1 = createTestSearchRequestOperationsListener(); + SearchRequestOperationsCompositeListenerFactory requestListeners = new SearchRequestOperationsCompositeListenerFactory( + testListener1 + ); + testListener1.setEnabled(true); + SearchRequestOperationsListener testListener2 = createTestSearchRequestOperationsListener(); + testListener2.setEnabled(false); + + SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); + SearchRequest searchRequest = new SearchRequest().source(source); + searchRequest.setPhaseTook(false); + SearchRequestOperationsListener.CompositeListener compositeListener = requestListeners.buildCompositeListener( + searchRequest, + logger, + testListener2 + ); + List listeners = compositeListener.getListeners(); + assertEquals(1, listeners.size()); + assertEquals(testListener1, listeners.get(0)); + assertEquals(1, requestListeners.getListeners().size()); + assertEquals(testListener1, requestListeners.getListeners().get(0)); + } + + public SearchRequestOperationsListener createTestSearchRequestOperationsListener() { + return new SearchRequestOperationsListener() { + @Override + void onPhaseStart(SearchPhaseContext context) {} + + @Override + void onPhaseEnd(SearchPhaseContext context, SearchRequestContext searchRequestContext) {} + + @Override + void onPhaseFailure(SearchPhaseContext context) {} + }; + } +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java index 58a4c4a4e555d..0f737e00478cb 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestOperationsListenerSupport.java @@ -8,6 +8,10 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; + +import java.util.List; + /** * Helper interface to access package protected {@link SearchRequestOperationsListener} from test cases. */ @@ -17,6 +21,12 @@ default void onPhaseStart(SearchRequestOperationsListener listener, SearchPhaseC } default void onPhaseEnd(SearchRequestOperationsListener listener, SearchPhaseContext context) { - listener.onPhaseEnd(context, new SearchRequestContext()); + listener.onPhaseEnd( + context, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java index e23f08c9415eb..f009988ffae17 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestSlowLogTests.java @@ -104,6 +104,7 @@ public void testMultipleSlowLoggersUseSingleLog4jLogger() { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null ); + SearchRequestOperationsCompositeListenerFactory searchRequestListeners = new SearchRequestOperationsCompositeListenerFactory(); SearchRequestSlowLog searchRequestSlowLog2 = new SearchRequestSlowLog(clusterService2); int numberOfLoggersAfter = context.getLoggers().size(); @@ -175,7 +176,8 @@ public void testConcurrentOnRequestEnd() throws InterruptedException { ArrayList searchRequestContexts = new ArrayList<>(); for (int i = 0; i < numRequests; i++) { SearchRequestContext searchRequestContext = new SearchRequestContext( - new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger) + new SearchRequestOperationsListener.CompositeListener(searchListenersList, logger), + searchRequest ); searchRequestContext.setAbsoluteStartNanos((i < numRequestsLogged) ? 0 : System.nanoTime()); searchRequestContexts.add(searchRequestContext); @@ -204,7 +206,10 @@ public void testSearchRequestSlowLogHasJsonFields_EmptySearchRequestContext() th SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); SearchRequest searchRequest = new SearchRequest().source(source); SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); - SearchRequestContext searchRequestContext = new SearchRequestContext(); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); SearchRequestSlowLog.SearchRequestSlowLogMessage p = new SearchRequestSlowLog.SearchRequestSlowLogMessage( searchPhaseContext, 10, @@ -225,7 +230,10 @@ public void testSearchRequestSlowLogHasJsonFields_NotEmptySearchRequestContext() SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); SearchRequest searchRequest = new SearchRequest().source(source); SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); - SearchRequestContext searchRequestContext = new SearchRequestContext(); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); searchRequestContext.updatePhaseTookMap(SearchPhaseName.FETCH.getName(), 10L); searchRequestContext.updatePhaseTookMap(SearchPhaseName.QUERY.getName(), 50L); searchRequestContext.updatePhaseTookMap(SearchPhaseName.EXPAND.getName(), 5L); @@ -251,7 +259,10 @@ public void testSearchRequestSlowLogHasJsonFields_PartialContext() throws IOExce SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); SearchRequest searchRequest = new SearchRequest().source(source); SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); - SearchRequestContext searchRequestContext = new SearchRequestContext(); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); searchRequestContext.updatePhaseTookMap(SearchPhaseName.FETCH.getName(), 10L); searchRequestContext.updatePhaseTookMap(SearchPhaseName.QUERY.getName(), 50L); searchRequestContext.updatePhaseTookMap(SearchPhaseName.EXPAND.getName(), 5L); @@ -277,7 +288,10 @@ public void testSearchRequestSlowLogSearchContextPrinterToLog() throws IOExcepti SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); SearchRequest searchRequest = new SearchRequest().source(source); SearchPhaseContext searchPhaseContext = new MockSearchPhaseContext(1, searchRequest); - SearchRequestContext searchRequestContext = new SearchRequestContext(); + SearchRequestContext searchRequestContext = new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ); searchRequestContext.updatePhaseTookMap(SearchPhaseName.FETCH.getName(), 10L); searchRequestContext.updatePhaseTookMap(SearchPhaseName.QUERY.getName(), 50L); searchRequestContext.updatePhaseTookMap(SearchPhaseName.EXPAND.getName(), 5L); diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java index 93cf77933fdd5..377ccebbfd418 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestStatsTests.java @@ -8,9 +8,13 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Phaser; @@ -22,7 +26,8 @@ public class SearchRequestStatsTests extends OpenSearchTestCase { public void testSearchRequestPhaseFailure() { - SearchRequestStats testRequestStats = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); SearchPhaseContext ctx = mock(SearchPhaseContext.class); SearchPhase mockSearchPhase = mock(SearchPhase.class); when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); @@ -37,7 +42,8 @@ public void testSearchRequestPhaseFailure() { } public void testSearchRequestStats() { - SearchRequestStats testRequestStats = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); SearchPhaseContext ctx = mock(SearchPhaseContext.class); SearchPhase mockSearchPhase = mock(SearchPhase.class); @@ -50,7 +56,13 @@ public void testSearchRequestStats() { long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); assertEquals(1, testRequestStats.getPhaseCurrent(searchPhaseName)); - testRequestStats.onPhaseEnd(ctx, new SearchRequestContext()); + testRequestStats.onPhaseEnd( + ctx, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(0, testRequestStats.getPhaseCurrent(searchPhaseName)); assertEquals(1, testRequestStats.getPhaseTotal(searchPhaseName)); assertThat(testRequestStats.getPhaseMetric(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); @@ -58,7 +70,8 @@ public void testSearchRequestStats() { } public void testSearchRequestStatsOnPhaseStartConcurrently() throws InterruptedException { - SearchRequestStats testRequestStats = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); int numTasks = randomIntBetween(5, 50); Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); @@ -85,7 +98,8 @@ public void testSearchRequestStatsOnPhaseStartConcurrently() throws InterruptedE } public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedException { - SearchRequestStats testRequestStats = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); int numTasks = randomIntBetween(5, 50); Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); @@ -102,7 +116,13 @@ public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedExc for (int i = 0; i < numTasks; i++) { threads[i] = new Thread(() -> { phaser.arriveAndAwaitAdvance(); - testRequestStats.onPhaseEnd(ctx, new SearchRequestContext()); + testRequestStats.onPhaseEnd( + ctx, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); countDownLatch.countDown(); }); threads[i].start(); @@ -121,7 +141,8 @@ public void testSearchRequestStatsOnPhaseEndConcurrently() throws InterruptedExc } public void testSearchRequestStatsOnPhaseFailureConcurrently() throws InterruptedException { - SearchRequestStats testRequestStats = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); int numTasks = randomIntBetween(5, 50); Thread[] threads = new Thread[numTasks * SearchPhaseName.values().length]; Phaser phaser = new Phaser(numTasks * SearchPhaseName.values().length + 1); diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java index 1004965c0d50e..ce4d5ca4f7091 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseMergerTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TotalHits; import org.opensearch.OpenSearchException; @@ -132,7 +133,13 @@ public void testMergeTookInMillis() throws InterruptedException { addResponse(merger, searchResponse); } awaitResponsesAdded(); - SearchResponse searchResponse = merger.getMergedResponse(SearchResponse.Clusters.EMPTY); + SearchResponse searchResponse = merger.getMergedResponse( + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); } @@ -184,7 +191,13 @@ public void testMergeShardFailures() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -235,7 +248,13 @@ public void testMergeShardFailuresNullShardTarget() throws InterruptedException awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -281,7 +300,13 @@ public void testMergeShardFailuresNullShardId() throws InterruptedException { } awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); - ShardSearchFailure[] shardFailures = merger.getMergedResponse(SearchResponse.Clusters.EMPTY).getShardFailures(); + ShardSearchFailure[] shardFailures = merger.getMergedResponse( + SearchResponse.Clusters.EMPTY, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ).getShardFailures(); assertThat(Arrays.asList(shardFailures), containsInAnyOrder(expectedFailures.toArray(ShardSearchFailure.EMPTY_ARRAY))); } @@ -315,7 +340,13 @@ public void testMergeProfileResults() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, merger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -377,7 +408,13 @@ public void testMergeCompletionSuggestions() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -449,7 +486,13 @@ public void testMergeCompletionSuggestionsTieBreak() throws InterruptedException awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -523,7 +566,13 @@ public void testMergeAggs() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, mergedResponse.getClusters()); assertEquals(numResponses, mergedResponse.getTotalShards()); assertEquals(numResponses, mergedResponse.getSuccessfulShards()); @@ -680,7 +729,13 @@ public void testMergeSearchHits() throws InterruptedException { awaitResponsesAdded(); assertEquals(numResponses, searchResponseMerger.numResponses()); final SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse searchResponse = searchResponseMerger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); assertEquals(expectedTotal, searchResponse.getTotalShards()); @@ -740,7 +795,13 @@ public void testMergeNoResponsesAdded() { SearchResponseMerger merger = new SearchResponseMerger(0, 10, Integer.MAX_VALUE, timeProvider, emptyReduceContextBuilder()); SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); assertEquals(0, merger.numResponses()); - SearchResponse response = merger.getMergedResponse(clusters); + SearchResponse response = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertSame(clusters, response.getClusters()); assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), response.getTook().millis()); assertEquals(0, response.getTotalShards()); @@ -813,7 +874,13 @@ public void testMergeEmptySearchHitsWithNonEmpty() { merger.add(searchResponse); } assertEquals(2, merger.numResponses()); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(10, mergedResponse.getHits().getTotalHits().value); assertEquals(10, mergedResponse.getHits().getHits().length); assertEquals(2, mergedResponse.getTotalShards()); @@ -855,7 +922,13 @@ public void testMergeOnlyEmptyHits() { ); merger.add(searchResponse); } - SearchResponse mergedResponse = merger.getMergedResponse(clusters); + SearchResponse mergedResponse = merger.getMergedResponse( + clusters, + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + new SearchRequest() + ) + ); assertEquals(expectedTotalHits, mergedResponse.getHits().getTotalHits()); } diff --git a/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java b/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java deleted file mode 100644 index 4d8a44417a3ee..0000000000000 --- a/server/src/test/java/org/opensearch/action/search/SearchTimeProviderTests.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.search; - -import org.opensearch.test.OpenSearchTestCase; - -import java.util.concurrent.TimeUnit; - -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class SearchTimeProviderTests extends OpenSearchTestCase { - - public void testSearchTimeProviderPhaseFailure() { - TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); - SearchPhaseContext ctx = mock(SearchPhaseContext.class); - SearchPhase mockSearchPhase = mock(SearchPhase.class); - when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); - - for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); - testTimeProvider.onPhaseStart(ctx); - assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); - testTimeProvider.onPhaseFailure(ctx); - assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); - } - } - - public void testSearchTimeProviderPhaseEnd() { - TransportSearchAction.SearchTimeProvider testTimeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); - - SearchPhaseContext ctx = mock(SearchPhaseContext.class); - SearchPhase mockSearchPhase = mock(SearchPhase.class); - when(ctx.getCurrentPhase()).thenReturn(mockSearchPhase); - - for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { - when(mockSearchPhase.getSearchPhaseName()).thenReturn(searchPhaseName); - long tookTimeInMillis = randomIntBetween(1, 100); - testTimeProvider.onPhaseStart(ctx); - long startTime = System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(tookTimeInMillis); - when(mockSearchPhase.getStartTimeInNanos()).thenReturn(startTime); - assertNull(testTimeProvider.getPhaseTookTime(searchPhaseName)); - testTimeProvider.onPhaseEnd(ctx, new SearchRequestContext()); - assertThat(testTimeProvider.getPhaseTookTime(searchPhaseName), greaterThanOrEqualTo(tookTimeInMillis)); - } - } -} diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java index c4bf8a5d87172..da19c839f3826 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java @@ -32,6 +32,7 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.search.TotalHits; import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; @@ -483,7 +484,11 @@ public void testCCSRemoteReduceMergeFails() throws Exception { remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -541,7 +546,11 @@ public void testCCSRemoteReduce() throws Exception { remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -578,7 +587,11 @@ public void testCCSRemoteReduce() throws Exception { remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -636,7 +649,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -676,7 +693,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); @@ -727,7 +748,11 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteClusterService, threadPool, listener, - (r, l) -> setOnce.set(Tuple.tuple(r, l)) + (r, l) -> setOnce.set(Tuple.tuple(r, l)), + new SearchRequestContext( + new SearchRequestOperationsListener.CompositeListener(List.of(), LogManager.getLogger()), + searchRequest + ) ); if (localIndices == null) { assertNull(setOnce.get()); diff --git a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java index 52b272094cd86..5656b77445772 100644 --- a/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java +++ b/server/src/test/java/org/opensearch/index/search/stats/SearchStatsTests.java @@ -37,6 +37,8 @@ import org.opensearch.action.search.SearchPhaseName; import org.opensearch.action.search.SearchRequestOperationsListenerSupport; import org.opensearch.action.search.SearchRequestStats; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.index.search.stats.SearchStats.Stats; import org.opensearch.test.OpenSearchTestCase; @@ -77,7 +79,8 @@ public void testShardLevelSearchGroupStats() throws Exception { long paramValue = randomIntBetween(2, 50); // Testing for request stats - SearchRequestStats testRequestStats = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats testRequestStats = new SearchRequestStats(clusterSettings); SearchPhaseContext ctx = mock(SearchPhaseContext.class); for (SearchPhaseName searchPhaseName : SearchPhaseName.values()) { SearchPhase mockSearchPhase = mock(SearchPhase.class); diff --git a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java index 6f36d22b7e17b..2424e38636466 100644 --- a/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java +++ b/server/src/test/java/org/opensearch/indices/NodeIndicesStatsTests.java @@ -34,6 +34,8 @@ import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.search.SearchRequestStats; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.test.OpenSearchTestCase; @@ -46,7 +48,8 @@ public class NodeIndicesStatsTests extends OpenSearchTestCase { public void testInvalidLevel() { CommonStats oldStats = new CommonStats(); - SearchRequestStats requestStats = new SearchRequestStats(); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + SearchRequestStats requestStats = new SearchRequestStats(clusterSettings); final NodeIndicesStats stats = new NodeIndicesStats(oldStats, Collections.emptyMap(), requestStats); final String level = randomAlphaOfLength(16); final ToXContent.Params params = new ToXContent.MapParams(Collections.singletonMap("level", level)); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 9fe1f8294fc74..9bb1f51c51cf6 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -90,7 +90,7 @@ import org.opensearch.action.search.SearchExecutionStatsCollector; import org.opensearch.action.search.SearchPhaseController; import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchRequestSlowLog; +import org.opensearch.action.search.SearchRequestOperationsCompositeListenerFactory; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchTransportService; import org.opensearch.action.search.TransportSearchAction; @@ -2285,6 +2285,8 @@ public void onFailure(final Exception e) { writableRegistry(), searchService::aggReduceContextBuilder ); + SearchRequestOperationsCompositeListenerFactory searchRequestOperationsCompositeListenerFactory = + new SearchRequestOperationsCompositeListenerFactory(); actions.put( SearchAction.INSTANCE, new TransportSearchAction( @@ -2310,9 +2312,8 @@ public void onFailure(final Exception e) { List.of(), client ), - null, - new SearchRequestSlowLog(clusterService), - NoopMetricsRegistry.INSTANCE + NoopMetricsRegistry.INSTANCE, + searchRequestOperationsCompositeListenerFactory ) ); actions.put( From a90b6b64d5fbde8c13b6acca076584e8060c599a Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Fri, 12 Jan 2024 05:30:55 +0800 Subject: [PATCH 67/81] Fix noop_update_total metric in indexing stats cannot be updated by bulk API (#11485) Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../test/indices.stats/50_noop_update.yml | 55 +++++++++++++++++++ .../action/bulk/BulkWithUpdatesIT.java | 8 +++ .../action/bulk/TransportShardBulkAction.java | 1 + 4 files changed, 65 insertions(+) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f64dcf82bd4d..1edb8560357cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -216,6 +216,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix remote shards balancer and remove unused variables ([#11167](https://github.com/opensearch-project/OpenSearch/pull/11167)) - Fix parsing of flat object fields with dots in keys ([#11425](https://github.com/opensearch-project/OpenSearch/pull/11425)) - Fix bug where replication lag grows post primary relocation ([#11238](https://github.com/opensearch-project/OpenSearch/pull/11238)) +- Fix noop_update_total metric in indexing stats cannot be updated by bulk API ([#11485](https://github.com/opensearch-project/OpenSearch/pull/11485)) - Fix for stuck update action in a bulk with `retry_on_conflict` property ([#11152](https://github.com/opensearch-project/OpenSearch/issues/11152)) - Fix template setting override for replication type ([#11417](https://github.com/opensearch-project/OpenSearch/pull/11417)) - Fix Automatic addition of protocol broken in #11512 ([#11609](https://github.com/opensearch-project/OpenSearch/pull/11609)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml new file mode 100644 index 0000000000000..dd8c2a2deb721 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/50_noop_update.yml @@ -0,0 +1,55 @@ +--- +setup: + + - do: + indices.create: + index: test1 + wait_for_active_shards: all + body: + settings: + index.number_of_shards: 1 + index.number_of_replicas: 1 + + - do: + index: + index: test1 + id: 1 + body: { "bar": "bar" } + + - do: + indices.refresh: {} + +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/9857 +--- +"Test noop_update_total metric can be updated by both update API and bulk API": + - skip: + version: " - 2.99.99" #TODO: change to 2.11.99 after the PR is backported to 2.x branch + reason: "fixed in 3.0" + + - do: + update: + index: test1 + id: 1 + body: { "doc": { "bar": "bar" } } + + - do: + indices.stats: + index: test1 + metric: indexing + + - match: { indices.test1.primaries.indexing.noop_update_total: 1 } + - match: { indices.test1.total.indexing.noop_update_total: 1 } + + - do: + bulk: + body: | + {"update": {"_id": "1", "_index": "test1"}} + {"doc": {"bar": "bar"}} + + - do: + indices.stats: + index: test1 + metric: indexing + + - match: { indices.test1.primaries.indexing.noop_update_total: 2 } + - match: { indices.test1.total.indexing.noop_update_total: 2 } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java index d7fb632c847d1..e27c0c4786da8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkWithUpdatesIT.java @@ -35,6 +35,8 @@ import org.opensearch.action.DocWriteRequest.OpType; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.indices.alias.Alias; +import org.opensearch.action.admin.indices.stats.IndicesStatsRequest; +import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.get.GetResponse; import org.opensearch.action.index.IndexRequest; @@ -738,6 +740,12 @@ public void testNoopUpdate() { equalTo(2) ); + // test noop_update_total metric in stats changed + IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest().indices(indexName).indexing(true); + final IndicesStatsResponse indicesStatsResponse = client().admin().indices().stats(indicesStatsRequest).actionGet(); + assertThat(indicesStatsResponse.getIndex(indexName).getTotal().indexing.getTotal().getNoopUpdateCount(), equalTo(1L)); + assertThat(indicesStatsResponse.getIndex(indexName).getPrimaries().indexing.getTotal().getNoopUpdateCount(), equalTo(1L)); + final BulkItemResponse notFoundUpdate = bulkResponse.getItems()[1]; assertNotNull(notFoundUpdate.getFailure()); diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index b8517f53ff294..a7a13afd2597c 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -593,6 +593,7 @@ static boolean executeBulkItemRequest( context.setRequestToExecute(updateResult.action()); break; case NOOP: + context.getPrimary().noopUpdate(); context.markOperationAsNoOp(updateResult.action()); context.markAsCompleted(context.getExecutionResult()); return true; From 5c82ab885a876d659c9714c3b080488777506027 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 11 Jan 2024 17:48:15 -0500 Subject: [PATCH 68/81] Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings (#11811) * Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --------- Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 4 +- .../licenses/jackson-core-2.16.0.jar.sha1 | 1 - .../licenses/jackson-core-2.16.1.jar.sha1 | 1 + .../jackson-annotations-2.16.0.jar.sha1 | 1 - .../jackson-annotations-2.16.1.jar.sha1 | 1 + .../licenses/jackson-databind-2.16.0.jar.sha1 | 1 - .../licenses/jackson-databind-2.16.1.jar.sha1 | 1 + .../licenses/jackson-core-2.16.0.jar.sha1 | 1 - .../licenses/jackson-core-2.16.1.jar.sha1 | 1 + .../licenses/jackson-core-2.16.0.jar.sha1 | 1 - .../licenses/jackson-core-2.16.1.jar.sha1 | 1 + .../jackson-dataformat-cbor-2.16.0.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.16.1.jar.sha1 | 1 + .../jackson-dataformat-smile-2.16.0.jar.sha1 | 1 - .../jackson-dataformat-smile-2.16.1.jar.sha1 | 1 + .../jackson-dataformat-yaml-2.16.0.jar.sha1 | 1 - .../jackson-dataformat-yaml-2.16.1.jar.sha1 | 1 + .../common/xcontent/XContentContraints.java | 35 +++ .../common/xcontent/cbor/CborXContent.java | 17 +- .../common/xcontent/json/JsonXContent.java | 17 +- .../common/xcontent/smile/SmileXContent.java | 17 +- .../common/xcontent/yaml/YamlXContent.java | 17 +- .../common/xcontent/XContentParserTests.java | 232 ++++++++++++++++++ .../common/xcontent/depth-off-limit.cbor.gz | Bin 0 -> 1888 bytes .../common/xcontent/depth-off-limit.json.gz | Bin 0 -> 2045 bytes .../common/xcontent/depth-off-limit.smile.gz | Bin 0 -> 1906 bytes .../common/xcontent/depth-off-limit.yaml.gz | Bin 0 -> 5950 bytes .../jackson-annotations-2.16.0.jar.sha1 | 1 - .../jackson-annotations-2.16.1.jar.sha1 | 1 + .../licenses/jackson-databind-2.16.0.jar.sha1 | 1 - .../licenses/jackson-databind-2.16.1.jar.sha1 | 1 + .../jackson-annotations-2.16.0.jar.sha1 | 1 - .../jackson-annotations-2.16.1.jar.sha1 | 1 + .../licenses/jackson-databind-2.16.0.jar.sha1 | 1 - .../licenses/jackson-databind-2.16.1.jar.sha1 | 1 + .../jackson-annotations-2.16.0.jar.sha1 | 1 - .../jackson-annotations-2.16.1.jar.sha1 | 1 + .../licenses/jackson-databind-2.16.0.jar.sha1 | 1 - .../licenses/jackson-databind-2.16.1.jar.sha1 | 1 + .../jackson-annotations-2.16.0.jar.sha1 | 1 - .../jackson-annotations-2.16.1.jar.sha1 | 1 + .../licenses/jackson-databind-2.16.0.jar.sha1 | 1 - .../licenses/jackson-databind-2.16.1.jar.sha1 | 1 + .../jackson-dataformat-xml-2.16.0.jar.sha1 | 1 - .../jackson-dataformat-xml-2.16.1.jar.sha1 | 1 + .../jackson-datatype-jsr310-2.16.0.jar.sha1 | 1 - .../jackson-datatype-jsr310-2.16.1.jar.sha1 | 1 + ...on-module-jaxb-annotations-2.16.0.jar.sha1 | 1 - ...on-module-jaxb-annotations-2.16.1.jar.sha1 | 1 + .../jackson-annotations-2.16.0.jar.sha1 | 1 - .../jackson-annotations-2.16.1.jar.sha1 | 1 + .../licenses/jackson-databind-2.16.0.jar.sha1 | 1 - .../licenses/jackson-databind-2.16.1.jar.sha1 | 1 + .../index/mapper/MapperService.java | 35 ++- .../index/mapper/MapperServiceTests.java | 42 ++++ 56 files changed, 411 insertions(+), 48 deletions(-) delete mode 100644 client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 create mode 100644 client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 delete mode 100644 libs/core/licenses/jackson-core-2.16.0.jar.sha1 create mode 100644 libs/core/licenses/jackson-core-2.16.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 create mode 100644 libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java create mode 100644 libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.cbor.gz create mode 100644 libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.json.gz create mode 100644 libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.smile.gz create mode 100644 libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.yaml.gz delete mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1edb8560357cf..10338f6646053 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -194,6 +194,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Capture information for additional query types and aggregation types ([#11582](https://github.com/opensearch-project/OpenSearch/pull/11582)) - Use slice_size == shard_size heuristic in terms aggs for concurrent segment search and properly calculate the doc_count_error ([#11732](https://github.com/opensearch-project/OpenSearch/pull/11732)) - Added Support for dynamically adding SearchRequestOperationsListeners with SearchRequestOperationsCompositeListenerFactory ([#11526](https://github.com/opensearch-project/OpenSearch/pull/11526)) +- Ensure Jackson default maximums introduced in 2.16.0 do not conflict with OpenSearch settings ([#11890](https://github.com/opensearch-project/OpenSearch/pull/11890)) ### Deprecated diff --git a/buildSrc/version.properties b/buildSrc/version.properties index eceb08c05953d..3813750507f18 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -7,8 +7,8 @@ bundled_jdk = 21.0.1+12 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.16.0 -jackson_databind = 2.16.0 +jackson = 2.16.1 +jackson_databind = 2.16.1 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 diff --git a/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 deleted file mode 100644 index c2b70fb4ae202..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..908d071b34a2a --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.16.1.jar.sha1 @@ -0,0 +1 @@ +9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 deleted file mode 100644 index 79ed9e0c63fc8..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..cbc65687606fc --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.16.1.jar.sha1 @@ -0,0 +1 @@ +fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 deleted file mode 100644 index da00d281934b1..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..d231db4fd49fc --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.16.1.jar.sha1 @@ -0,0 +1 @@ +02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.16.0.jar.sha1 b/libs/core/licenses/jackson-core-2.16.0.jar.sha1 deleted file mode 100644 index c2b70fb4ae202..0000000000000 --- a/libs/core/licenses/jackson-core-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.16.1.jar.sha1 b/libs/core/licenses/jackson-core-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..908d071b34a2a --- /dev/null +++ b/libs/core/licenses/jackson-core-2.16.1.jar.sha1 @@ -0,0 +1 @@ +9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 deleted file mode 100644 index c2b70fb4ae202..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -899e5cf01be55fbf094ad72b2edb0c5df99111ee \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..908d071b34a2a --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.16.1.jar.sha1 @@ -0,0 +1 @@ +9456bb3cdd0f79f91a5f730a1b1bb041a380c91f \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 deleted file mode 100644 index 8da478fc6013d..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -35e8b7bf4fc1d078766bb155103d433ed5bb1627 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..b4b781f604910 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.16.1.jar.sha1 @@ -0,0 +1 @@ +1be7098dccc079171464dca7e386bd8df623b031 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 deleted file mode 100644 index 3e952ffe92418..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3c422d7f3901c9a1becf9df3cf41efc68a5ab95c \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..ad91e748ebe94 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.16.1.jar.sha1 @@ -0,0 +1 @@ +c4ddbc5277670f2e56b1f5e44e83afa748bcb125 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 deleted file mode 100644 index d62b5874ab023..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2033e2c5f531785d17f3a2bc31842e3bbb7983b2 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..9b30e7bf921b2 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.16.1.jar.sha1 @@ -0,0 +1 @@ +8e4f1923d73cd55f2b4c0d56ee4ed80419297354 \ No newline at end of file diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java new file mode 100644 index 0000000000000..4c05f0058f2ed --- /dev/null +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentContraints.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.xcontent; + +import com.fasterxml.jackson.core.StreamReadConstraints; + +import org.opensearch.common.annotation.InternalApi; + +/** + * Consolidates the XContent constraints (primarily reflecting Jackson's {@link StreamReadConstraints} constraints) + * + * @opensearch.internal + */ +@InternalApi +public interface XContentContraints { + final String DEFAULT_MAX_STRING_LEN_PROPERTY = "opensearch.xcontent.string.length.max"; + final String DEFAULT_MAX_NAME_LEN_PROPERTY = "opensearch.xcontent.name.length.max"; + final String DEFAULT_MAX_DEPTH_PROPERTY = "opensearch.xcontent.depth.max"; + + final int DEFAULT_MAX_STRING_LEN = Integer.parseInt(System.getProperty(DEFAULT_MAX_STRING_LEN_PROPERTY, "50000000" /* ~50 Mb */)); + + final int DEFAULT_MAX_NAME_LEN = Integer.parseInt( + System.getProperty(DEFAULT_MAX_NAME_LEN_PROPERTY, "50000" /* StreamReadConstraints.DEFAULT_MAX_NAME_LEN */) + ); + + final int DEFAULT_MAX_DEPTH = Integer.parseInt( + System.getProperty(DEFAULT_MAX_DEPTH_PROPERTY, "1000" /* StreamReadConstraints.DEFAULT_MAX_DEPTH */) + ); +} diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java index 81f8fe9b6366f..7e92f236213d4 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/cbor/CborXContent.java @@ -37,8 +37,10 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; @@ -58,11 +60,7 @@ /** * A CBOR based content implementation using Jackson. */ -public class CborXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class CborXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(cborXContent); } @@ -76,7 +74,14 @@ public static XContentBuilder contentBuilder() throws IOException { // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.cbor.CBORGenerator#close() method cborFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); cborFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - cborFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + cborFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + cborFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); cborFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); cborXContent = new CborXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java index 4bd7c4c99bb46..91f6bbeb4f786 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/json/JsonXContent.java @@ -38,7 +38,9 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; @@ -57,11 +59,7 @@ /** * A JSON based content implementation using Jackson. */ -public class JsonXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class JsonXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(jsonXContent); } @@ -78,7 +76,14 @@ public static XContentBuilder contentBuilder() throws IOException { // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - jsonFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + jsonFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + jsonFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); jsonFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); jsonXContent = new JsonXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java index e824d4e1ae991..c73e126102a80 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/smile/SmileXContent.java @@ -37,9 +37,11 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.smile.SmileFactory; import com.fasterxml.jackson.dataformat.smile.SmileGenerator; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; @@ -58,11 +60,7 @@ /** * A Smile based content implementation using Jackson. */ -public class SmileXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class SmileXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(smileXContent); } @@ -78,7 +76,14 @@ public static XContentBuilder contentBuilder() throws IOException { // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); smileFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - smileFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + smileFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + smileFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); smileFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); smileXContent = new SmileXContent(); } diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java index 0ad3c44e0168a..3f6a4b3aeead7 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/yaml/YamlXContent.java @@ -36,8 +36,10 @@ import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.StreamReadConstraints; import com.fasterxml.jackson.core.StreamReadFeature; +import com.fasterxml.jackson.core.StreamWriteConstraints; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.MediaType; @@ -56,11 +58,7 @@ /** * A YAML based content implementation using Jackson. */ -public class YamlXContent implements XContent { - public static final int DEFAULT_MAX_STRING_LEN = Integer.parseInt( - System.getProperty("opensearch.xcontent.string.length.max", "50000000" /* ~50 Mb */) - ); - +public class YamlXContent implements XContent, XContentContraints { public static XContentBuilder contentBuilder() throws IOException { return XContentBuilder.builder(yamlXContent); } @@ -71,7 +69,14 @@ public static XContentBuilder contentBuilder() throws IOException { static { yamlFactory = new YAMLFactory(); yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, true); - yamlFactory.setStreamReadConstraints(StreamReadConstraints.builder().maxStringLength(DEFAULT_MAX_STRING_LEN).build()); + yamlFactory.setStreamWriteConstraints(StreamWriteConstraints.builder().maxNestingDepth(DEFAULT_MAX_DEPTH).build()); + yamlFactory.setStreamReadConstraints( + StreamReadConstraints.builder() + .maxStringLength(DEFAULT_MAX_STRING_LEN) + .maxNameLength(DEFAULT_MAX_NAME_LEN) + .maxNestingDepth(DEFAULT_MAX_DEPTH) + .build() + ); yamlFactory.configure(StreamReadFeature.USE_FAST_DOUBLE_PARSER.mappedFeature(), true); yamlXContent = new YamlXContent(); } diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java index d3d9ea174cf1b..0e431d8ea4277 100644 --- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/XContentParserTests.java @@ -40,6 +40,7 @@ import org.opensearch.common.xcontent.cbor.CborXContent; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.common.xcontent.smile.SmileXContent; +import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; @@ -48,16 +49,20 @@ import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.io.InputStream; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.function.Supplier; +import java.util.zip.GZIPInputStream; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -67,6 +72,7 @@ import static org.hamcrest.Matchers.in; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.nullValue; +import static org.junit.Assume.assumeThat; import static org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage; public class XContentParserTests extends OpenSearchTestCase { @@ -94,6 +100,50 @@ public class XContentParserTests extends OpenSearchTestCase { () -> randomRealisticUnicodeOfCodepointLength(3145730) ); + private static final Map> FIELD_NAME_GENERATORS = Map.of( + XContentType.JSON, + () -> randomAlphaOfLengthBetween(1, JsonXContent.DEFAULT_MAX_NAME_LEN), + XContentType.CBOR, + () -> randomAlphaOfLengthBetween(1, CborXContent.DEFAULT_MAX_NAME_LEN), + XContentType.SMILE, + () -> randomAlphaOfLengthBetween(1, SmileXContent.DEFAULT_MAX_NAME_LEN), + XContentType.YAML, + () -> randomAlphaOfLengthBetween(1, YamlXContent.DEFAULT_MAX_NAME_LEN) + ); + + private static final Map> FIELD_NAME_OFF_LIMIT_GENERATORS = Map.of( + XContentType.JSON, + () -> randomAlphaOfLength(JsonXContent.DEFAULT_MAX_NAME_LEN + 1), + XContentType.CBOR, + () -> randomAlphaOfLength(CborXContent.DEFAULT_MAX_NAME_LEN + 1), + XContentType.SMILE, + () -> randomAlphaOfLength(SmileXContent.DEFAULT_MAX_NAME_LEN + 1), + XContentType.YAML, + () -> randomAlphaOfLength(YamlXContent.DEFAULT_MAX_NAME_LEN + 1) + ); + + private static final Map> DEPTH_GENERATORS = Map.of( + XContentType.JSON, + () -> randomIntBetween(1, JsonXContent.DEFAULT_MAX_DEPTH), + XContentType.CBOR, + () -> randomIntBetween(1, CborXContent.DEFAULT_MAX_DEPTH), + XContentType.SMILE, + () -> randomIntBetween(1, SmileXContent.DEFAULT_MAX_DEPTH), + XContentType.YAML, + () -> randomIntBetween(1, YamlXContent.DEFAULT_MAX_DEPTH) + ); + + private static final Map> OFF_LIMIT_DEPTH_GENERATORS = Map.of( + XContentType.JSON, + () -> JsonXContent.DEFAULT_MAX_DEPTH + 1, + XContentType.CBOR, + () -> CborXContent.DEFAULT_MAX_DEPTH + 1, + XContentType.SMILE, + () -> SmileXContent.DEFAULT_MAX_DEPTH + 1, + XContentType.YAML, + () -> YamlXContent.DEFAULT_MAX_DEPTH + 1 + ); + public void testStringOffLimit() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); @@ -155,6 +205,188 @@ public void testString() throws IOException { } } + public void testFieldNameOffLimit() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + final String field = FIELD_NAME_OFF_LIMIT_GENERATORS.get(xContentType).get(); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + builder.endObject(); + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + // See please https://github.com/FasterXML/jackson-dataformats-binary/issues/392, support + // for CBOR, Smile is coming + if (xContentType != XContentType.JSON) { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field, parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } else { + assertThrows(StreamConstraintsException.class, () -> parser.nextToken()); + } + } + } + } + + public void testFieldName() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + final String field = FIELD_NAME_GENERATORS.get(xContentType).get(); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + builder.endObject(); + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field, parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + } + } + + public void testWriteDepthOffLimit() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + // Branching off YAML logic into separate test case testWriteDepthOffLimitYaml since it behaves differently + assumeThat(xContentType, not(XContentType.YAML)); + + final String field = randomAlphaOfLengthBetween(1, 5); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + final int maxDepth = OFF_LIMIT_DEPTH_GENERATORS.get(xContentType).get() - 1; + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.startObject(); + builder.field(field + depth); + } + + // The behavior here is very interesting: the generator does write the new object tag (changing the internal state) + // BUT throws the exception after the fact, this is why we have to close the object at the end. + assertThrows(StreamConstraintsException.class, () -> builder.startObject()); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + + builder.endObject(); + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.endObject(); + } + } + } + + public void testWriteDepthOffLimitYaml() throws IOException { + final String field = randomAlphaOfLengthBetween(1, 5); + try (XContentBuilder builder = XContentBuilder.builder(XContentType.YAML.xContent())) { + final int maxDepth = OFF_LIMIT_DEPTH_GENERATORS.get(XContentType.YAML).get() - 1; + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.startObject(); + builder.field(field + depth); + } + + // The behavior here is very interesting: the generator does write the new object tag (changing the internal state) + // BUT throws the exception after the fact, this is why we have to close the object at the end. + assertThrows(StreamConstraintsException.class, () -> builder.startObject()); + } catch (final IllegalStateException ex) { + // YAML parser is having really hard time recovering from StreamConstraintsException, the internal + // state seems to be completely messed up and the closing cleanly seems to be not feasible. + } + } + + public void testReadDepthOffLimit() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + final int maxDepth = OFF_LIMIT_DEPTH_GENERATORS.get(xContentType).get() - 1; + + // Since parser and generator use the same max depth constraints, we could not generate the content with off limits, + // using precreated test files instead. + try ( + InputStream in = new GZIPInputStream( + getDataInputStream("depth-off-limit." + xContentType.name().toLowerCase(Locale.US) + ".gz") + ) + ) { + try (XContentParser parser = createParser(xContentType.xContent(), in)) { + for (int depth = 0; depth < maxDepth; ++depth) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + } + + if (xContentType != XContentType.YAML) { + assertThrows(StreamConstraintsException.class, () -> parser.nextToken()); + } + } + } + } + + public void testDepth() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + final String field = randomAlphaOfLengthBetween(1, 5); + final String value = randomAlphaOfLengthBetween(1, 5); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + final int maxDepth = DEPTH_GENERATORS.get(xContentType).get() - 1; + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.startObject(); + builder.field(field + depth); + } + + builder.startObject(); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + builder.endObject(); + + for (int depth = 0; depth < maxDepth; ++depth) { + builder.endObject(); + } + + try (XContentParser parser = createParser(xContentType.xContent(), BytesReference.bytes(builder))) { + for (int depth = 0; depth < maxDepth; ++depth) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field + depth, parser.currentName()); + } + + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field, parser.currentName()); + assertEquals(XContentParser.Token.VALUE_STRING, parser.nextToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + + for (int depth = 0; depth < maxDepth; ++depth) { + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + } + + assertNull(parser.nextToken()); + } + } + } + public void testFloat() throws IOException { final XContentType xContentType = randomFrom(XContentType.values()); diff --git a/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.cbor.gz b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.cbor.gz new file mode 100644 index 0000000000000000000000000000000000000000..88de7e590e7f0e785fb1462c78ddac6aed50e9df GIT binary patch literal 1888 zcmb2|=HS>^IX9JwIVH8ABtthpEloEkGdHtDFF7f{h~e$!-GV8{1zjDdm%X}vRmtQ6 z4~vJENs+H|x<%^?m1OC z^|RLV&~vLn0-LbFqWGu#PSsBRt@S$t{vlyER46|A_Wr}iA3pw2_`{+`#%?HalYd6oP3wO;RH(Zp{|Qt~p>%Zi$H>fn*F&Q*vSIU|~IF19)2(_I_= zv|3DxjMaURe*R0*k>p2`UcSH8I(<*NHU9YAoRmLi*?2tuv1$8k;eV!k*12eDh}m*T z8&XkFGxqs_;IHITRS<4-My{lv4NY@;wR-aleV6KjD2mKz*BrA&dh=s_`20iFx1yE| z2BhIYW?S@p;ruz@ukUc5LA~SDlRKPWgZdHGQ&9_Ggjd-kqKVEU_aAr<;S3R_;a%C^I67#S`{f?jw1EUTDTfiFMp4u#!@EX%p zv~JZVbkK@`Q$P|}=lkfRoZ3y7q%q|=LD2z1AO&nJ`kS&pA+j+@O-`0m0)7Mbg4caj zwFltH*Fis&KGU-0B?0#VnqRX_?UQ?Yb4u{F(q8fvd24_MV1lhh{gk|fsK($YrNgEi z+Bm=gAzy7R1CDwf?6Yjiy%A!9)KB7%u14(yR)b}-2QZb1kH%Ejk#TZFWP$o{H%u3j z+q9&)@!=Mtw0&l-4;QfjwawHwDx9DjDk;>45o{)RU>7%&2a#Iad-t;je;T6Ph77&* zJBjz$XTj1ubFj{e8g2l29cpr0PD>@yu0eBxgFnMN26*4N8MOtn*DBesF zFMeV{tB3BJwtd~kKg&C3xJ$hlYKeZ}oKR+^(rhM#m^d{?^wQq3P_KJv9mp15Hj(~j zABW`|?OWv-$AWLPEutWvim9eVpiuxQb6-j1%yJAOL6@kbKBali*oaK?U~-j<(#4R- z!!v0r)$b-~XB*=v?~YzfA{$-e62rPWyhnvSzh&8cRpBmmOjft+!U!(Nxk4W+dvW!^ zlv!dL+Mq*)f)6P8O4s#5*CfRwl2Gt7?$ug_$Bdo0f!(u);HDf7W%CLeP$r{BU=+fe zlPBx5*2hs_uI*$gF7<=wgtsRff40BvSQugLVwO8UU6-Qw?#E8vF=KmZ<`^$8uGV04 z%sV<$ie+6eok?iL^rP~CDDZ+Tj+1^xfsDFfVcajf5mRMqz=khMIP`&_-MM_4fRa3aQ>YHZ6|zSUiBM=oL;!@RUjS$Dj(PcvbmU$8%c zjgCcOf#u){*S{nTsM(! zQ@aCulR@*|8hbg!)0q=ZAsk<2@X3sAlgplqe@Zb%`n&gWXw8L*UfO)__m9MZoq<5H zY~M}vde?Pea&J-k>dqb}vSc zGE`n@Z*dF_XkryXC_^qq($}*mI+8`nmdNmaTDiBO6hnHIey+c)N|ZAJ3Bo+F)=?s OLC#3j&t4Dp@%bMjq~}5a literal 0 HcmV?d00001 diff --git a/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.smile.gz b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.smile.gz new file mode 100644 index 0000000000000000000000000000000000000000..e248778b372539560625af3aba6b97a801a4094b GIT binary patch literal 1906 zcmb2|=HS>^IX9JwIVH8ABtthpEloEkGdHtDuQ)d|CzavtC2gTxPr+#|6h*V|M_!WfB(N9^Ui-i^8Ly8E8o9- zKl6Rg`@twZ-3tQJoo(VdD}q(W%n`Rw|#GGZ~xx*J@@_Xd)xQsSKkK;?6v>@dF#i+ zpFg&!+dj=aU(uItt0h0R|CIaF#h(IyuBZvI1981pw4k6>OT3WL$|+vRX!RB^By?&; z4+;vj)N+5iLT2jXqL98*K+ft-C@B2ts+g&(i$d3(3f-v{-g-B|{047X^N^uZ%y0UZ zO};x+sHE*Tbj!N$4izeG`_0_4*>{Hul~{g5wygc`P@xXLOZ>i}`iAWrxibFU?RSR~ zH@0m0-PH1%LxtM=Caz3>_w>7`-=%)vRDE*@alf1I{{QFc_5FYT=Kufky1xE@9T5Ke QzyCjDR=sb+133l;0Q8?wRsaA1 literal 0 HcmV?d00001 diff --git a/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.yaml.gz b/libs/x-content/src/test/resources/org/opensearch/common/xcontent/depth-off-limit.yaml.gz new file mode 100644 index 0000000000000000000000000000000000000000..3b36594482a6818c5d2f230aa862c21297f0cac8 GIT binary patch literal 5950 zcmWOAc{~$*90%~MEuy?(FXU*GTR_kMj}zpq9b3iTqUz#j_p^N%0~9uB{J`EY1Z zSP)Ss#wRQk%>S@`xQDq7a99qHr&uGC*uXiK?3jFX$Z-6)PVI2Ty$|?H5@VZps~2?i z#L!Q+?@F-TSe|6YZ#J$hGDqV#m%N8^He*&S-wbVK%i8AVXwK%*KkFL0n=!HLMY{13 zq%|?!cw*e9jc$DO%5sWseC+z~3~oZ0#b%x$YN%#IXMKKQuEjpSrDbVp##?n^Gffbl zKDwSKsBswGtQ7D%Gyc30=;~&ye-VV&WNZu>K#ejs=L9^a-3Dbfb-1qRlx4m5s{Tx= z%$N9s>p!k`bd?lgtJm5E^;3QSX@1m6z1&mP_#d`J5e0|Y93;(4*rNv#} zrvr)=5be%N>5b2UA)ceL%Wr5+tKUQ|C-Mz{;)3yKRw!5 zE%cljx*=>nOc`zdXQ?C{+q?czQ1EGR^ii|Wb<=~pROk^KzWR?{Ubw?3B}_OxBTnWn z3~`sjJ>ud`6FrCaocdH7PLZ0~FEgL9TkX?}l*v;z*JdYA^}NU@n>-{MFE6f}jCf_TB>&sUQoWd!q4;b1N$idD4mw|^6hHDxy}guM>PKE5jmuy~#mK}l z*Z+a8P?m=5enyRn>12(jX*A*E+~WAwE9)!ccE6%#5sF`U-_vw*Mp=qpn3fD(mY+?_ z)0$U-apB&P-~Z^e{P%8UJx*A%XtpV@*ma%llG9b+nytuLe8ZypLL|22Z=uB8V{Np0=n=B z-RDzfPjUw8&yGv}j;UPUCz3ihzMHI5dF_#T<=G}q;b?_ta2Iih?#rpSxjAbS-)sIu zEMeW(v&bhyBf8Cvjb8sQ+^6Ch-Idd!!wTr6DUMzbvUs`#&?|E?aq;DRF7SLr7= zdG@O(4rAuFjbC|Wc$;e6(RAYT(B;E>rtj7_Zxi*OUqr<&RwW&8rkI87X$@xxv-nJo z^54L?yXkK^X7`0JXycyMFBoP|?pK^K*R;cl$sOz<826Go1DV9|y8-OH;NgJJL&1jw zIVvHPz;7NQ9f4zcA!Lm^>7e3ejE(6UFUbZ}x`sAn*3D7N;TVf)>eafTh$FP|B9 zcf9O0?3;fnGW;&hR5uzv%sgo{ZqK}8^ec?{&}il^v)O2_+VaBnyybs9Qu0IVH`=Pg2Y`mdcgjHu$(U7L1a z^cRl3kohX#_Gu8~D2<-`fHg_3(p(gv>z?CjK7Q4Y4#C=2^KdNSG} z1Ab~w{q^nG;;O6J6sPk`&)BtfJ00#N_*3$&kA2BC`GCxcB;J;;S^qd_dP0mYydfcb zB!sV^;n)AG9P_oR&&8i{zNNIsss5Q`W9#LDqT6j9gO+E-@|PgJYS%n?s4|P`x5kXt z+C`lNEm+qYLRs3Jry6Lq{5FL#&R@Ke_zcEblTenm;$Z?UP^xRVr*<-oXWyH-_m$-L0%8xf?8 zW1rf%Z`7L986!)a6*-~Itp&02C2P3SvymO!E-^^)$1@+#7Hs!HRxaV+?|LrAD?}L* zSQ)#&WeU@8Mx!szFEr08~k9c`Bgx8l>OP#F}zccu5ifVe&H^W)#d=MnovQ+!k z?2qn9hn^75Z+_TbN{xDjw-kRn@WJ3!^gvoF-*GVaGFD)wj9XVhbbNFo<%~6ZC#O2TQ@;jBA>8c~XZ8_YwH({^zt={rzZ(QD+ zMJ!*=iq@4G#b>vDd~+?D@FY53M%I1@uXnt)eZt%fi}id|jT#I%$@PGR>Aq=l|U6>;@bi_cS6i zeRh{yQV+S<+C%+v@v{o(0peh}C-si}X>*ugfpf4bda>Af34AYoZ~?U+^P<-VcdxwU zGE~A|CsM&FlvG|#O_ay85cl&hkqwocqO;1sP$#(fYNRVYaMqadx2p>m@<1!kRJP1$ zxHk^>LA+EQUg2P3xZ5eFr(7jrkh{Qm2v^2qh5I;uv*jiUC$k;ZBn!8Hx0=71NPaDMf=*24-M93rutIRUnOJ|SJZjHEG5x?KTD?9?uALQlu#R_sUw z@@LoOlIaz}x6z;d&IZ7Jo>dO+K^ge;?XfC8KtA^r-l|pIv=3z*)~9F1POWSfw|4L} zbd)3hSCN!h%XJG=fETi7OV9#^bH@;^+Ao?8-m8(dAGrdDl8(CB{$*AX(?s~jY0 zk2-^y1crEZp(HK0C-^O)*0y>e$K6_xF8D~pFig9j+Y~YzzchJrUIlM3-q+UZ>f&`* z6>l`t*U?JkdL357AN{v)p!L3scZV9@bfs^+wSns$rjAzyy>qQ!H3^k8SuI)U>?&au z!|0cVCswao%1Oi}Lk$yf63AJ2LeV&G9?ky?U8W89x(PR=vGW*=1-WRIoi~EFCCDaN zQFilf1D)zovC68wN1)!xVvTp%IS^~so(S#ZWeM;Fqew0K4gWMy6^*Topb%%btwaJi`mSCljAmFdYnU`+zqKrLmPg@anZ%k zcLl1_dQD*+uzRZkj3lRGE#>LmO6A$eRg{flBNt~3u{Ag3oH^y}D^1*Sztk5O%)KaCK>11cL|+s_b*&!tl>)H zR8evjz4u|7i4RP#qffnZCd(v?yU%tf;56;-n#vsvEcXzzzFEyZdDu)&C!#!D3`)~6 z;QBD1_jY$BzuIyaVZi}1`r zLuqe*`LGyGJaBd;A;CIVIi66)c($OSbf{_u0cet;t1v3$SY9E7@OkW622j#}J1^J? z+K@-rk!-f6gDez7%;e?(DfNE@2SJ;d>AEBv{@B40hB;BoodWpn3jzzkg=5#{*jUZ* z8XGA6;Pgx$(2^vR4p3*HMH>=?cbw8GC|z%PrVVIG$CLD_K`=&~G+{!nECoh4Ma|BE ze`J!xSn6y+g43oLT0vcq3=+pP2xX3N^vz?eJ-`z7ByKy^Tk}kYIJ=Tj#)FU9yKlWU zz7A&A5fZJToIyGVEX31S-GPO42FU|7qcT@LL9^675&(#9h@?_y<&> z3RkJ%+)YQ875EEQ7N<#V)D%>upvF?s6*Z`_a!kblYOEbsv56X+ajMW3M<2uVmBKUk z<Cv+8X|FA18CD7!+K7E7}`qk`H=>F|q!gCM> ziQ3g_aQmjaxjR54RW%=?KJhtMvlBOttBjXq5X)GzGte@*P+<(jRh&s`0S6Hm*9oA{ z`nmcM>TN5xQ6*eoZDo!WUcJp*0n>UMSMU-HNDQl0%vY%Tj5MFVf2PhEOW6v`b z2%q`SnF<6Bs2Not^vcEeCP`o-G6!-bFhu%5r35CLG0-A`i7gx8N?@#N2c{%2CteM# zNnmW*1G17B`_BV=Br#_=1ICgVmv5FcB?$T<{#*@$t|ORxji6(e=h_f-y+3mu2)Y4e zdH_K;LQIb%=tsq;=MZ#L>1iQ?j@>rBiJ+VB$QQz4r?y)94)nBU5E)TQ)cbM*c^F@R z-}DjWn42>vN{U_Cz);*pHBPmP*8@D~q(XmSu~&@p6~y9Y6ujDX2WKzd4@oCpVay{fPJXqb2x|eb*u${9T$?FcJ?c_NuNyfYX6r-}V4%ufp}3o=(v+Akd31kIDk%=@1bVLMVHGIbIGR?ZQM-5W=gUmo>HlBtBfU zb8BwE16sBY_E$w{2;tk_0PpSKqy2S7@&kY&KBt*h@62lB;cjiZcA~yT0$eX?t~7T8be)y&DF~`Kw}^Ef(DfjL zH3+J$3&Z>(pc{z~4j`zhTn5Vv&`q}uZX&4dE|1N90NrBOpf-{^!yS+G=Z#@xC&U_Ycr)9^^5=R|+PG>gIq?=xGR-1=w7pDA9iqDk7D;VF7CQM-DEEft^~8YXHKY5Cnyzf%`q&ydoS_ zI+vLz1A_nd#u_$nA+O__7Q&G1r8ibUC4?M94$H6au4{N zbY2t3$KsF_bzxGce)`IB3XFjReRoCXn4B zxTkwVz5fD%4`_~}CMcDb`fbB)i`I@Ic}bwG?Yrg&!7UO)d56F>T64%za?4h`=o^&k zkX1)v0!Cg#sICr}-hX7MND`1vcLb1cKyU%=-z~fV$CLDusR)R4#{3cjcs>gxQz5ug z+ref6;DqJAbW;Mv{IYosB=9T^Cf|nOrrHNBd;y2t)clPs1)zIF*Y0ikx*wLI2T)lJ z^Bzdx)AN$`6`-9aUbgJo0yZqd!l;DyA&WpDr$ZT1kp@t$h_w<3?!7`>V~R3>+fRtj z!l<=DKfFSK+|heO@ms`7uaWB@IKJPNA|rq*>YN`&0@z;S+UqT22Zq(d0p^r5lcxft dvidi-cGjEgQ}G+Zn)p8=OAn1@ncY6J;D3!l^~eAK literal 0 HcmV?d00001 diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 deleted file mode 100644 index 79ed9e0c63fc8..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..cbc65687606fc --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.16.1.jar.sha1 @@ -0,0 +1 @@ +fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 deleted file mode 100644 index da00d281934b1..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..d231db4fd49fc --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.16.1.jar.sha1 @@ -0,0 +1 @@ +02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 deleted file mode 100644 index 79ed9e0c63fc8..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..cbc65687606fc --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.16.1.jar.sha1 @@ -0,0 +1 @@ +fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 deleted file mode 100644 index da00d281934b1..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..d231db4fd49fc --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.16.1.jar.sha1 @@ -0,0 +1 @@ +02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 deleted file mode 100644 index 79ed9e0c63fc8..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..cbc65687606fc --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.16.1.jar.sha1 @@ -0,0 +1 @@ +fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 deleted file mode 100644 index da00d281934b1..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..d231db4fd49fc --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.16.1.jar.sha1 @@ -0,0 +1 @@ +02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 deleted file mode 100644 index 79ed9e0c63fc8..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..cbc65687606fc --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.16.1.jar.sha1 @@ -0,0 +1 @@ +fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 deleted file mode 100644 index da00d281934b1..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..d231db4fd49fc --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.16.1.jar.sha1 @@ -0,0 +1 @@ +02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 deleted file mode 100644 index f0d165ff7cf82..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3cdb002e0f2f30ad9c5fd053d78b1a485511ab1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..ad4e055d4f19a --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.16.1.jar.sha1 @@ -0,0 +1 @@ +d952ad30d3f2d1220f39db175618414b56d14638 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 deleted file mode 100644 index 40379694f5ea5..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77e3a27823f795d928b897d8444744ddb044a5c3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..4309dad93b2b6 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.16.1.jar.sha1 @@ -0,0 +1 @@ +36a418325c618e440e5ccb80b75c705d894f50bd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 deleted file mode 100644 index 820d14b3df8e4..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -684daae9ea45087c670b4f6511edcfdb19c3a695 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..5f54d0ac554e0 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.16.1.jar.sha1 @@ -0,0 +1 @@ +e9df364a2695e66eb8d2803d6725424842760125 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 deleted file mode 100644 index 79ed9e0c63fc8..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc30995f7428c0a405eba9b8c619b20d2b3b9905 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..cbc65687606fc --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.16.1.jar.sha1 @@ -0,0 +1 @@ +fd441d574a71e7d10a4f73de6609f881d8cdfeec \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 deleted file mode 100644 index da00d281934b1..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.16.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6b7f8ff7b30d518bbd65678e9c30cd881f19a7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 new file mode 100644 index 0000000000000..d231db4fd49fc --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.16.1.jar.sha1 @@ -0,0 +1 @@ +02a16efeb840c45af1e2f31753dfe76795278b73 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 8ebb007787828..9b8fa7eec37b9 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -46,6 +46,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.Assertions; @@ -149,13 +150,45 @@ public enum MergeReason { "index.mapping.depth.limit", 20L, 1, + Long.MAX_VALUE, + limit -> { + // Make sure XContent constraints are not exceeded (otherwise content processing will fail) + if (limit > XContentContraints.DEFAULT_MAX_DEPTH) { + throw new IllegalArgumentException( + "The provided value " + + limit + + " of the index setting 'index.mapping.depth.limit' exceeds per-JVM configured limit of " + + XContentContraints.DEFAULT_MAX_DEPTH + + ". Please change the setting value or increase per-JVM limit " + + "using '" + + XContentContraints.DEFAULT_MAX_DEPTH_PROPERTY + + "' system property." + ); + } + }, Property.Dynamic, Property.IndexScope ); public static final Setting INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING = Setting.longSetting( "index.mapping.field_name_length.limit", - Long.MAX_VALUE, + 50000, 1L, + Long.MAX_VALUE, + limit -> { + // Make sure XContent constraints are not exceeded (otherwise content processing will fail) + if (limit > XContentContraints.DEFAULT_MAX_NAME_LEN) { + throw new IllegalArgumentException( + "The provided value " + + limit + + " of the index setting 'index.mapping.field_name_length.limit' exceeds per-JVM configured limit of " + + XContentContraints.DEFAULT_MAX_NAME_LEN + + ". Please change the setting value or increase per-JVM limit " + + "using '" + + XContentContraints.DEFAULT_MAX_NAME_LEN_PROPERTY + + "' system property." + ); + } + }, Property.Dynamic, Property.IndexScope ); diff --git a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java index f0f34dff0a38f..bb3f2be8ea748 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MapperServiceTests.java @@ -36,6 +36,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentContraints; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; @@ -65,6 +66,7 @@ import java.util.Map; import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -158,6 +160,26 @@ public void testMappingDepthExceedsLimit() throws Throwable { assertThat(e.getMessage(), containsString("Limit of mapping depth [1] has been exceeded")); } + public void testMappingDepthExceedsXContentLimit() throws Throwable { + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> createIndex( + "test1", + Settings.builder() + .put(MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), XContentContraints.DEFAULT_MAX_DEPTH + 1) + .build() + ) + ); + + assertThat( + ex.getMessage(), + is( + "The provided value 1001 of the index setting 'index.mapping.depth.limit' exceeds per-JVM configured limit of 1000. " + + "Please change the setting value or increase per-JVM limit using 'opensearch.xcontent.depth.max' system property." + ) + ); + } + public void testUnmappedFieldType() { MapperService mapperService = createIndex("index").mapperService(); assertThat(mapperService.unmappedFieldType("keyword"), instanceOf(KeywordFieldType.class)); @@ -300,6 +322,26 @@ public void testTotalFieldsLimitWithFieldAlias() throws Throwable { assertEquals("Limit of total fields [" + numberOfNonAliasFields + "] has been exceeded", e.getMessage()); } + public void testFieldNameLengthExceedsXContentLimit() throws Throwable { + final IllegalArgumentException ex = assertThrows( + IllegalArgumentException.class, + () -> createIndex( + "test1", + Settings.builder() + .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), XContentContraints.DEFAULT_MAX_NAME_LEN + 1) + .build() + ) + ); + + assertThat( + ex.getMessage(), + is( + "The provided value 50001 of the index setting 'index.mapping.field_name_length.limit' exceeds per-JVM configured limit of 50000. " + + "Please change the setting value or increase per-JVM limit using 'opensearch.xcontent.name.length.max' system property." + ) + ); + } + public void testFieldNameLengthLimit() throws Throwable { int maxFieldNameLength = randomIntBetween(25, 30); String testString = new String(new char[maxFieldNameLength + 1]).replace("\0", "a"); From c8ae7f05a7823c480c5e9b2dccae5ca26ad0908b Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 12 Jan 2024 16:50:30 -0600 Subject: [PATCH 69/81] Fix log appender in InsecureSettingTests (#11866) The appender was being added to the logger before the appender was started. This can lead to logger errors with concurrently running tests because the logger is static state shared across the JVM. See #10799. I've also added a removeAppender call that was missing from LoggersTests, but that is mostly hygiene and would not lead to failures. Signed-off-by: Andrew Ross --- .../common/logging/LoggersTests.java | 63 ++++++++++--------- .../common/settings/InsecureSettingTests.java | 7 ++- 2 files changed, 38 insertions(+), 32 deletions(-) diff --git a/server/src/test/java/org/opensearch/common/logging/LoggersTests.java b/server/src/test/java/org/opensearch/common/logging/LoggersTests.java index 17c4f9d0fe13d..d9db57aef15b6 100644 --- a/server/src/test/java/org/opensearch/common/logging/LoggersTests.java +++ b/server/src/test/java/org/opensearch/common/logging/LoggersTests.java @@ -53,40 +53,45 @@ public void testParameterizedMessageLambda() throws Exception { appender.start(); final Logger testLogger = LogManager.getLogger(LoggersTests.class); Loggers.addAppender(testLogger, appender); - Loggers.setLevel(testLogger, Level.TRACE); + try { + Loggers.setLevel(testLogger, Level.TRACE); - Throwable ex = randomException(); - testLogger.error(() -> new ParameterizedMessage("an error message"), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); + Throwable ex = randomException(); + testLogger.error(() -> new ParameterizedMessage("an error message"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.ERROR)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an error message")); - ex = randomException(); - testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); + ex = randomException(); + testLogger.warn(() -> new ParameterizedMessage("a warn message: [{}]", "long gc"), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.WARN)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a warn message: [long gc]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining("long gc")); - testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); - assertThat(appender.lastEvent.getThrown(), nullValue()); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); + testLogger.info(() -> new ParameterizedMessage("an info message a=[{}], b=[{}], c=[{}]", 1, 2, 3)); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.INFO)); + assertThat(appender.lastEvent.getThrown(), nullValue()); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("an info message a=[1], b=[2], c=[3]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(1, 2, 3)); - ex = randomException(); - testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); + ex = randomException(); + testLogger.debug(() -> new ParameterizedMessage("a debug message options = {}", Arrays.asList("yes", "no")), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.DEBUG)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a debug message options = [yes, no]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(Arrays.asList("yes", "no"))); - ex = randomException(); - testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[] { null }), ex); - assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); - assertThat(appender.lastEvent.getThrown(), equalTo(ex)); - assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); - assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[] { null })); + ex = randomException(); + testLogger.trace(() -> new ParameterizedMessage("a trace message; element = [{}]", new Object[] { null }), ex); + assertThat(appender.lastEvent.getLevel(), equalTo(Level.TRACE)); + assertThat(appender.lastEvent.getThrown(), equalTo(ex)); + assertThat(appender.lastParameterizedMessage().getFormattedMessage(), equalTo("a trace message; element = [null]")); + assertThat(appender.lastParameterizedMessage().getParameters(), arrayContaining(new Object[] { null })); + } finally { + Loggers.removeAppender(testLogger, appender); + appender.stop(); + } } private Throwable randomException() { diff --git a/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java b/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java index b256ab956f963..9358013826a1c 100644 --- a/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/InsecureSettingTests.java @@ -25,7 +25,7 @@ public class InsecureSettingTests extends OpenSearchTestCase { private List rootLogMsgs = new ArrayList<>(); private AbstractAppender rootAppender; - protected void assertSettingWarning() { + private void assertSettingWarning() { assertWarnings( "[setting.name] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." ); @@ -50,13 +50,14 @@ public void append(LogEvent event) { InsecureSettingTests.this.rootLogMsgs.add(message); } }; - Loggers.addAppender(LogManager.getRootLogger(), rootAppender); rootAppender.start(); + Loggers.addAppender(LogManager.getLogger(SecureSetting.class), rootAppender); } @After public void removeInsecureSettingsAppender() { - Loggers.removeAppender(LogManager.getRootLogger(), rootAppender); + Loggers.removeAppender(LogManager.getLogger(SecureSetting.class), rootAppender); + rootAppender.stop(); } public void testShouldRaiseExceptionByDefault() { From 988dea88a0aaae192db602f35f5cd6d714d07fce Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Sat, 13 Jan 2024 22:16:44 +0800 Subject: [PATCH 70/81] Update supported version for must_exist parameter in update aliases API (#11872) * Update supported version for must_exist parameter in update aliases API Signed-off-by: Gao Binlong * Modify change log Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../40_remove_with_must_exist.yml | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10338f6646053..a4d42116fa9af 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,6 +83,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Don't over-allocate in HeapBufferedAsyncEntityConsumer in order to consume the response ([#9993](https://github.com/opensearch-project/OpenSearch/pull/9993)) - Update supported version for max_shard_size parameter in Shrink API ([#11439](https://github.com/opensearch-project/OpenSearch/pull/11439)) - Fix typo in API annotation check message ([11836](https://github.com/opensearch-project/OpenSearch/pull/11836)) +- Update supported version for must_exist parameter in update aliases API ([#11872](https://github.com/opensearch-project/OpenSearch/pull/11872)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml index dbf6a4fad3295..b9457f0290897 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.update_aliases/40_remove_with_must_exist.yml @@ -1,8 +1,8 @@ --- "Throw aliases missing exception when removing non-existing alias with setting must_exist to true": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: @@ -47,8 +47,8 @@ --- "Throw aliases missing exception when all of the specified aliases are non-existing": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: @@ -81,8 +81,8 @@ --- "Remove successfully when some specified aliases are non-existing": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: @@ -116,8 +116,8 @@ --- "Remove silently when all of the specified aliases are non-existing and must_exist is false": - skip: - version: " - 2.99.99" - reason: "introduced in 3.0" + version: " - 2.11.99" + reason: "introduced in 2.12.0" - do: indices.create: From ac4aca210f799237922b15c4c55faeccd8cb9f7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 09:16:42 -0500 Subject: [PATCH 71/81] Bump lycheeverse/lychee-action from 1.9.0 to 1.9.1 (#11887) * Bump lycheeverse/lychee-action from 1.9.0 to 1.9.1 Bumps [lycheeverse/lychee-action](https://github.com/lycheeverse/lychee-action) from 1.9.0 to 1.9.1. - [Release notes](https://github.com/lycheeverse/lychee-action/releases) - [Commits](https://github.com/lycheeverse/lychee-action/compare/v1.9.0...v1.9.1) --- updated-dependencies: - dependency-name: lycheeverse/lychee-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .github/workflows/links.yml | 2 +- CHANGELOG.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index 2714d45bd108f..61962c91b4903 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -13,7 +13,7 @@ jobs: - uses: actions/checkout@v4 - name: lychee Link Checker id: lychee - uses: lycheeverse/lychee-action@v1.9.0 + uses: lycheeverse/lychee-action@v1.9.1 with: args: --accept=200,403,429 --exclude-mail **/*.html **/*.md **/*.txt **/*.json --exclude-file .lychee.excludes fail: true diff --git a/CHANGELOG.md b/CHANGELOG.md index a4d42116fa9af..8b6840b62ca5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -165,7 +165,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-lang3` from 3.13.0 to 3.14.0 ([#11691](https://github.com/opensearch-project/OpenSearch/pull/11691)) - Bump `com.maxmind.db:maxmind-db` from 3.0.0 to 3.1.0 ([#11693](https://github.com/opensearch-project/OpenSearch/pull/11693)) - Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) -- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.0 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795)) +- Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.1 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795), [#11887](https://github.com/opensearch-project/OpenSearch/pull/11887)) - Bump `Lucene` from 9.8.0 to 9.9.1 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)) ### Changed From a90dd86646d64f76f9e08b11455c522b202d10ee Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jan 2024 09:23:18 -0500 Subject: [PATCH 72/81] Bump com.networknt:json-schema-validator from 1.0.86 to 1.1.0 (#11886) * Bump com.networknt:json-schema-validator from 1.0.86 to 1.1.0 Bumps [com.networknt:json-schema-validator](https://github.com/networknt/json-schema-validator) from 1.0.86 to 1.1.0. - [Release notes](https://github.com/networknt/json-schema-validator/releases) - [Changelog](https://github.com/networknt/json-schema-validator/blob/master/CHANGELOG.md) - [Commits](https://github.com/networknt/json-schema-validator/compare/1.0.86...1.1.0) --- updated-dependencies: - dependency-name: com.networknt:json-schema-validator dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + buildSrc/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8b6840b62ca5c..6d2558ef5743f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -167,6 +167,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `net.java.dev.jna:jna` from 5.13.0 to 5.14.0 ([#11798](https://github.com/opensearch-project/OpenSearch/pull/11798)) - Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.1 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795), [#11887](https://github.com/opensearch-project/OpenSearch/pull/11887)) - Bump `Lucene` from 9.8.0 to 9.9.1 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)) +- Bump `com.networknt:json-schema-validator` from 1.0.86 to 1.1.0 ([#11886](https://github.com/opensearch-project/OpenSearch/pull/11886)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3c846b48549fb..1cb21acd14af7 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -118,7 +118,7 @@ dependencies { api 'com.avast.gradle:gradle-docker-compose-plugin:0.17.6' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" api 'org.apache.maven:maven-model:3.9.6' - api 'com.networknt:json-schema-validator:1.0.86' + api 'com.networknt:json-schema-validator:1.1.0' api 'org.jruby.jcodings:jcodings:1.0.58' api 'org.jruby.joni:joni:2.2.1' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" From c132db950548ee99127bc92be8d0f388cc0f4a51 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 15 Jan 2024 13:01:29 -0500 Subject: [PATCH 73/81] Bump OpenTelemetry from 1.32.0 to 1.34.1 (#11891) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 | 1 + .../opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 | 1 - .../opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 | 1 + .../telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 | 1 + 28 files changed, 15 insertions(+), 14 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d2558ef5743f..26d324839ae54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -126,6 +126,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) - Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) - Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) +- Bump OpenTelemetry from 1.32.0 to 1.34.1 ([#11891](https://github.com/opensearch-project/OpenSearch/pull/11891)) ### Dependencies - Bumps jetty version to 9.4.52.v20230823 to fix GMS-2023-1857 ([#9822](https://github.com/opensearch-project/OpenSearch/pull/9822)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 3813750507f18..dd7f2e1eaabf0 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -70,5 +70,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.32.0 +opentelemetry = 1.34.1 opentelemetrysemconv = 1.23.1-alpha diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 deleted file mode 100644 index 2c038aad4b934..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a5c081d8f877225732efe13908f350029c811709 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..19f734ca17b79 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.34.1.jar.sha1 @@ -0,0 +1 @@ +b4aea155f6d6b1032eba85378564431cfd86f562 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 deleted file mode 100644 index 3243f524432eb..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c5f8bb68084ea5709a27e935907b1bb49d0bd049 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..4c06d28cba199 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.34.1.jar.sha1 @@ -0,0 +1 @@ +3fcc87f3d810ce49d865ee54b40831559c5e129b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 deleted file mode 100644 index 1d7da47286ae0..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3643061da474061ffa7f2036a58a7a0d40212276 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..91a5c0f715d2b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.34.1.jar.sha1 @@ -0,0 +1 @@ +19c9a3f52851a1333b648ed83c82d16eb4c64afd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 deleted file mode 100644 index 3fab0e47adcbe..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab56c7223112fac13a66e3f667c5fc666f4a3707 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..6c05600ae3b08 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.34.1.jar.sha1 @@ -0,0 +1 @@ +b3e74d5b8cf5e60d9965042fa284085bbe081ce3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 deleted file mode 100644 index f93cf7a63bfad..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5752d171cd08ac84f9273258a315bc5f97e1187e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..f54e6f6893050 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.34.1.jar.sha1 @@ -0,0 +1 @@ +af68f90f0410b7b3a1900d3e0a15ad51b10ffd5b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 deleted file mode 100644 index 2fc33b62aee54..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6b41cd66a385d513b58b6617f20b701435b64abd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..49d40b36ba85b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.34.1.jar.sha1 @@ -0,0 +1 @@ +4acab18052267e280d1f9de22c591a5c88bed3a6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 deleted file mode 100644 index 99f758b047aa2..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9346006cead763247a786b5cabf3e1ae3c88eadb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..a01de2aa84c43 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.34.1.jar.sha1 @@ -0,0 +1 @@ +9f07e1764389e076a36fb7d9e5769e29f3dab950 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 deleted file mode 100644 index 705a342a684c4..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.32.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fab56e187e3fb3c70c18223184d53a76500114ab \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 new file mode 100644 index 0000000000000..a5fc8c2059104 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-extension-incubator-1.34.1-alpha.jar.sha1 @@ -0,0 +1 @@ +9201e6a43a0a89515626f7516c7d1b2c349f76df \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 deleted file mode 100644 index 31818695cc774..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -504de8cc7dc68e84c8c7c2757522d934e9c50d35 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..cd746f0756e46 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.34.1.jar.sha1 @@ -0,0 +1 @@ +ab49eb621d6d01f0ad2f016989d0352ef18ea9a2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 deleted file mode 100644 index 3cf3080a98bd9..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -454c7a6afab864de9f0c166246f28f16aaa824c1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..740737dc13efc --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.34.1.jar.sha1 @@ -0,0 +1 @@ +01fcd8bad38d7b8987f6fc93bd7e933240eb727e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 deleted file mode 100644 index 41b0dca07556e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b054760243906af0a327a8f5bd99adc2826ccd88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..e6ff3dbafda22 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.34.1.jar.sha1 @@ -0,0 +1 @@ +abad9abc80dfe6118a60413afa161696bbf8dd43 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 deleted file mode 100644 index 2f71fd5cc780a..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bff24f085193e105d4e23e3db27bf81ccb3d830e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..36ec960c4f7be --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.34.1.jar.sha1 @@ -0,0 +1 @@ +d88407ae475e5f4e859a81e4f61e362e939f7bc2 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 deleted file mode 100644 index f0060b8a0f78f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.32.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d80ad3210fa890a856a1d04379d134ab44a09501 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..293b82f206c99 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.34.1.jar.sha1 @@ -0,0 +1 @@ +121a75c2ba9ed8b80f5ff131c2411a5d460f38d0 \ No newline at end of file From 4c283a7c79b20e73ccf3378124893451197b85a3 Mon Sep 17 00:00:00 2001 From: Fahad Shami <48306098+fahadshamiinsta@users.noreply.github.com> Date: Wed, 17 Jan 2024 09:32:55 +1100 Subject: [PATCH 74/81] Support for Google Application Default Credentials (#8394) * fixed conflicts Signed-off-by: fahadshamiinsta * applying spotless Java check Signed-off-by: fahadshamiinsta * added a comment to test helper method Signed-off-by: fahadshamiinsta * added a comment to GoogleApplicationDefaultCredentials class with document reference Signed-off-by: fahadshamiinsta * to rerun gradle checks Signed-off-by: fahadshamiinsta * increasing coverage by adding another test Signed-off-by: fahadshamiinsta * test name change Signed-off-by: fahadshamiinsta * rerun ci Signed-off-by: fahadshamiinsta * rerun ci Signed-off-by: fahadshamiinsta * force push to rerun ci Signed-off-by: fahadshamiinsta * pushing to trigger ci checks Signed-off-by: fahadshamiinsta --------- Signed-off-by: fahadshamiinsta --- CHANGELOG.md | 1 + .../GoogleApplicationDefaultCredentials.java | 33 +++++ .../gcs/GoogleCloudStorageService.java | 20 ++- .../gcs/GoogleCloudStorageServiceTests.java | 132 ++++++++++++++---- 4 files changed, 157 insertions(+), 29 deletions(-) create mode 100644 plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 26d324839ae54..14055f0d8462f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) - Switched to more reliable OpenSearch Lucene snapshot location([#11728](https://github.com/opensearch-project/OpenSearch/pull/11728)) +- Added support for Google Application Default Credentials in repository-gcs ([#8394](https://github.com/opensearch-project/OpenSearch/pull/8394)) ### Deprecated diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java new file mode 100644 index 0000000000000..5002ab9a2e704 --- /dev/null +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleApplicationDefaultCredentials.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.gcs; + +import com.google.auth.oauth2.GoogleCredentials; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.IOException; + +/** + * This class facilitates to fetch Application Default Credentials + * see How Application Default Credentials works + */ +public class GoogleApplicationDefaultCredentials { + private static final Logger logger = LogManager.getLogger(GoogleApplicationDefaultCredentials.class); + + public GoogleCredentials get() { + GoogleCredentials credentials = null; + try { + credentials = SocketAccess.doPrivilegedIOException(GoogleCredentials::getApplicationDefault); + } catch (IOException e) { + logger.error("Failed to retrieve \"Application Default Credentials\"", e); + } + return credentials; + } +} diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java index c9ebb3acaf3e5..83a4146c99b99 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageService.java @@ -36,6 +36,7 @@ import com.google.api.client.http.HttpRequestInitializer; import com.google.api.client.http.HttpTransport; import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.auth.oauth2.GoogleCredentials; import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.ServiceOptions; import com.google.cloud.http.HttpTransportOptions; @@ -70,6 +71,16 @@ public class GoogleCloudStorageService { */ private volatile Map clientCache = emptyMap(); + final private GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials; + + public GoogleCloudStorageService() { + this.googleApplicationDefaultCredentials = new GoogleApplicationDefaultCredentials(); + } + + public GoogleCloudStorageService(GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials) { + this.googleApplicationDefaultCredentials = googleApplicationDefaultCredentials; + } + /** * Refreshes the client settings and clears the client cache. Subsequent calls to * {@code GoogleCloudStorageService#client} will return new clients constructed @@ -213,10 +224,11 @@ StorageOptions createStorageOptions( storageOptionsBuilder.setProjectId(clientSettings.getProjectId()); } if (clientSettings.getCredential() == null) { - logger.warn( - "\"Application Default Credentials\" are not supported out of the box." - + " Additional file system permissions have to be granted to the plugin." - ); + logger.info("\"Application Default Credentials\" will be in use"); + final GoogleCredentials credentials = googleApplicationDefaultCredentials.get(); + if (credentials != null) { + storageOptionsBuilder.setCredentials(credentials); + } } else { ServiceAccountCredentials serviceAccountCredentials = clientSettings.getCredential(); // override token server URI diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java index a531555debefb..58e412684ed5a 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -33,8 +33,10 @@ package org.opensearch.repositories.gcs; import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.http.HttpTransportOptions; import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; @@ -42,30 +44,38 @@ import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; +import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; +import java.io.IOException; +import java.net.Proxy; +import java.net.URI; +import java.net.URISyntaxException; import java.security.KeyPair; import java.security.KeyPairGenerator; import java.util.Base64; import java.util.Locale; import java.util.UUID; +import org.mockito.Mockito; + import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class GoogleCloudStorageServiceTests extends OpenSearchTestCase { + final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + final String endpoint = randomFrom("http://", "https://") + + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") + + ":" + + randomIntBetween(1, 65535); + final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + public void testClientInitializer() throws Exception { final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); - final String applicationName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); - final String endpoint = randomFrom("http://", "https://") - + randomFrom("www.opensearch.org", "www.googleapis.com", "localhost/api", "google.com/oauth") - + ":" - + randomIntBetween(1, 65535); - final String projectIdName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); final Settings settings = Settings.builder() .put( GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), @@ -82,31 +92,35 @@ public void testClientInitializer() throws Exception { .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); - final GoogleCloudStorageService service = new GoogleCloudStorageService(); + GoogleCredentials mockGoogleCredentials = Mockito.mock(GoogleCredentials.class); + GoogleApplicationDefaultCredentials mockDefaultCredentials = Mockito.mock(GoogleApplicationDefaultCredentials.class); + Mockito.when(mockDefaultCredentials.get()).thenReturn(mockGoogleCredentials); + + final GoogleCloudStorageService service = new GoogleCloudStorageService(mockDefaultCredentials); service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); GoogleCloudStorageOperationsStats statsCollector = new GoogleCloudStorageOperationsStats("bucket"); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> service.client("another_client", "repo", statsCollector) ); - assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); + MatcherAssert.assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); assertSettingDeprecationsAndWarnings( new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) } ); final Storage storage = service.client(clientName, "repo", statsCollector); - assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); - assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); - assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); - assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); - assertThat( + MatcherAssert.assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); + MatcherAssert.assertThat(storage.getOptions().getHost(), Matchers.is(endpoint)); + MatcherAssert.assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); + MatcherAssert.assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); + MatcherAssert.assertThat( ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), Matchers.is((int) connectTimeValue.millis()) ); - assertThat( + MatcherAssert.assertThat( ((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), Matchers.is((int) readTimeValue.millis()) ); - assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); + MatcherAssert.assertThat(storage.getOptions().getCredentials(), Matchers.instanceOf(Credentials.class)); } public void testReinitClientSettings() throws Exception { @@ -122,33 +136,33 @@ public void testReinitClientSettings() throws Exception { final GoogleCloudStorageService storageService = plugin.storageService; GoogleCloudStorageOperationsStats statsCollector = new GoogleCloudStorageOperationsStats("bucket"); final Storage client11 = storageService.client("gcs1", "repo1", statsCollector); - assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + MatcherAssert.assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); final Storage client12 = storageService.client("gcs2", "repo2", statsCollector); - assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + MatcherAssert.assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // client 3 is missing final IllegalArgumentException e1 = expectThrows( IllegalArgumentException.class, () -> storageService.client("gcs3", "repo3", statsCollector) ); - assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); + MatcherAssert.assertThat(e1.getMessage(), containsString("Unknown client name [gcs3].")); // update client settings plugin.reload(settings2); // old client 1 not changed - assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); + MatcherAssert.assertThat(client11.getOptions().getProjectId(), equalTo("project_gcs11")); // new client 1 is changed final Storage client21 = storageService.client("gcs1", "repo1", statsCollector); - assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); + MatcherAssert.assertThat(client21.getOptions().getProjectId(), equalTo("project_gcs21")); // old client 2 not changed - assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); + MatcherAssert.assertThat(client12.getOptions().getProjectId(), equalTo("project_gcs12")); // new client2 is gone final IllegalArgumentException e2 = expectThrows( IllegalArgumentException.class, () -> storageService.client("gcs2", "repo2", statsCollector) ); - assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); + MatcherAssert.assertThat(e2.getMessage(), containsString("Unknown client name [gcs2].")); // client 3 emerged final Storage client23 = storageService.client("gcs3", "repo3", statsCollector); - assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); + MatcherAssert.assertThat(client23.getOptions().getProjectId(), equalTo("project_gcs23")); } } @@ -193,4 +207,72 @@ public void testToTimeout() { assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue()); assertEquals(0, GoogleCloudStorageService.toTimeout(TimeValue.MINUS_ONE).intValue()); } + + /** + * The following method test the Google Application Default Credential instead of + * using service account file. + * Considered use of JUnit Mocking due to static method GoogleCredentials.getApplicationDefault + * and avoiding environment variables to set which later use GCE. + * @throws Exception + */ + public void testApplicationDefaultCredential() throws Exception { + GoogleCloudStorageClientSettings settings = getGCSClientSettingsWithoutCredentials(); + GoogleCredentials mockGoogleCredentials = Mockito.mock(GoogleCredentials.class); + HttpTransportOptions mockHttpTransportOptions = Mockito.mock(HttpTransportOptions.class); + GoogleApplicationDefaultCredentials mockDefaultCredentials = Mockito.mock(GoogleApplicationDefaultCredentials.class); + Mockito.when(mockDefaultCredentials.get()).thenReturn(mockGoogleCredentials); + + GoogleCloudStorageService service = new GoogleCloudStorageService(mockDefaultCredentials); + StorageOptions storageOptions = service.createStorageOptions(settings, mockHttpTransportOptions); + assertNotNull(storageOptions); + assertEquals(storageOptions.getCredentials().toString(), mockGoogleCredentials.toString()); + } + + /** + * The application default credential throws exception when there are + * no Environment Variables provided or Google Compute Engine is not running + * @throws Exception + */ + public void testApplicationDefaultCredentialsWhenNoSettingProvided() throws Exception { + GoogleCloudStorageClientSettings settings = getGCSClientSettingsWithoutCredentials(); + HttpTransportOptions mockHttpTransportOptions = Mockito.mock(HttpTransportOptions.class); + GoogleCloudStorageService service = new GoogleCloudStorageService(); + StorageOptions storageOptions = service.createStorageOptions(settings, mockHttpTransportOptions); + + Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); + assertNotNull(storageOptions); + assertNull(storageOptions.getCredentials()); + MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + } + + /** + * The application default credential throws IOException when it is + * used without GoogleCloudStorageService + */ + public void testDefaultCredentialsThrowsExceptionWithoutGCStorageService() { + GoogleApplicationDefaultCredentials googleApplicationDefaultCredentials = new GoogleApplicationDefaultCredentials(); + GoogleCredentials credentials = googleApplicationDefaultCredentials.get(); + assertNull(credentials); + Exception exception = assertThrows(IOException.class, GoogleCredentials::getApplicationDefault); + MatcherAssert.assertThat(exception.getMessage(), containsString("The Application Default Credentials are not available")); + } + + /** + * This is a helper method to provide GCS Client settings without credentials + * @return GoogleCloudStorageClientSettings + * @throws URISyntaxException + */ + private GoogleCloudStorageClientSettings getGCSClientSettingsWithoutCredentials() throws URISyntaxException { + return new GoogleCloudStorageClientSettings( + null, + endpoint, + projectIdName, + connectTimeValue, + readTimeValue, + applicationName, + new URI(""), + new ProxySettings(Proxy.Type.DIRECT, null, 0, null, null) + ); + } + } From 1d8bbd5d643e483a0089243d5c7d0f7e28ac8d16 Mon Sep 17 00:00:00 2001 From: Peter Nied Date: Tue, 16 Jan 2024 16:55:49 -0600 Subject: [PATCH 75/81] Add TRIAGING.md for triage agenda and processes (#11814) Signed-off-by: Peter Nied Signed-off-by: Peter Nied --- TRIAGING.md | 83 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 TRIAGING.md diff --git a/TRIAGING.md b/TRIAGING.md new file mode 100644 index 0000000000000..47cb44a4f5ba2 --- /dev/null +++ b/TRIAGING.md @@ -0,0 +1,83 @@ + + +The maintainers of the OpenSearch Repo seek to promote an inclusive and engaged community of contributors. In order to facilitate this, weekly triage meetings are open-to-all and attendance is encouraged for anyone who hopes to contribute, discuss an issue, or learn more about the project. To learn more about contributing to the OpenSearch Repo visit the [Contributing](./CONTRIBUTING.md) documentation. + +### Do I need to attend for my issue to be addressed/triaged? + +Attendance is not required for your issue to be triaged or addressed. If not accepted the issue will be updated with a comment for next steps. All new issues are triaged weekly. + +You can track if your issue was triaged by watching your GitHub notifications for updates. + +### What happens if my issue does not get covered this time? + +Each meeting we seek to address all new issues. However, should we run out of time before your issue is discussed, you are always welcome to attend the next meeting or to follow up on the issue post itself. + +### How do I join the Triage meeting? + +Meetings are hosted regularly at 10:00a - 10:55a Central Time every Wednesday and can be joined via [Chime](https://aws.amazon.com/chime/), with this [meeting link](https://chime.aws/1988437365). + +After joining the Chime meeting, you can enable your video / voice to join the discussion. If you do not have a webcam or microphone available, you can still join in via the text chat. + +If you have an issue you'd like to bring forth please prepare a link to the issue so it can be presented and viewed by everyone in the meeting. + +### Is there an agenda for each week? + +Meetings are 55 minutes and follows this structure: + +Yes, each 55-minute meeting follows this structure: +1. **Initial Gathering:** Feel free to turn on your video and engage in informal conversation. Shortly, a volunteer triage [facilitator](#what-is-the-role-of-the-facilitator) will begin the meeting and share their screen. +2. **Record Attendees:** The facilitator will request attendees to share their GitHub profile links. These links will be collected and assembled into a [tag](#how-do-triage-facilitator-tag-comments-during-the-triage-meeting) to annotate comments during the meeting. +3. **Announcements:** Any announcements will be made at the beginning of the meeting. +4. **Review of New Issues:** We start by reviewing all untriaged [issues](https://github.com/search?q=label%3Auntriaged+is%3Aopen++repo%3Aopensearch-project%2FOpenSearch+&type=issues&ref=advsearch&s=created&o=desc) for the OpenSearch repo. +5. **Attendee Requests:** An opportunity for any meeting member to request consideration of an issue or pull request. +6. **Open Discussion:** Attendees can bring up any topics not already covered by filed issues or pull requests. + +### What is the role of the facilitator? + +The facilitator is crucial in driving the meeting, ensuring a smooth flow of issues into OpenSearch for future contributions. They maintain the meeting's agenda, solicit input from attendees, and record outcomes using the triage tag as items are discussed. + +### Do I need to have already contributed to the project to attend a triage meeting? + +No prior contributions are required. All interested individuals are welcome and encouraged to attend. Triage meetings offer a fantastic opportunity for new contributors to understand the project and explore various contribution avenues. + +### What if I have an issue that is almost a duplicate, should I open a new one to be triaged? + +You can always open an [issue](https://github.com/opensearch-project/OpenSearch/issues/new/choose) including one that you think may be a duplicate. If you believe your issue is similar but distinct from an existing one, you are encouraged to file it and explain the differences during the triage meeting. + +### What if I have follow-up questions on an issue? + +If you have an existing issue you would like to discuss, you can always comment on the issue itself. Alternatively, you are welcome to come to the triage meeting to discuss. + +### Is this meeting a good place to get help setting up features on my OpenSearch instance? + +While we are always happy to help the community, the best resource for implementation questions is [the OpenSearch forum](https://forum.opensearch.org/). + +There you can find answers to many common questions as well as speak with implementation experts. + +### What are the issue labels associated with triaging? + +Yes, there are several labels that are used to identify the 'state' of issues filed in OpenSearch . +| Label | When Applied | Meaning | +|---------------|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------| +| `Untriaged` | When issues are created or re-opened. | Issues labeled as 'Untriaged' require the attention of the repository maintainers and may need to be prioritized for quicker resolution. It's crucial to keep the count of 'Untriaged' labels low to ensure all potential security issues are addressed in a timely manner. See [SECURITY.md](https://github.com/opensearch-project/OpenSearch/blob/main/SECURITY.md) for more details on handling these issues. | +| `Help Wanted` | Anytime. | Issues marked as 'Help Wanted' signal that they are actionable and not the current focus of the project maintainers. Community contributions are especially encouraged for these issues. | +| `Good First Issue` | Anytime. | Issues labeled as 'Good First Issue' are small in scope and can be resolved with a single pull request. These are recommended starting points for newcomers looking to make their first contributions. | + +### What are the typical outcomes of a triaged issue? + +| Outcome | Label | Description | Canned Response | +|--------------|------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Accepted | `-untriaged` | The issue has the details needed to be directed towards area owners. | "Thanks for filing this issue, please feel free to submit a pull request." | +| Rejected | N/A | The issue will be closed with a reason for why it was rejected. Reasons might include lack of details, or being outside the scope of the project. | "Thanks for creating this issue; however, it isn't being accepted due to {REASON}. Please feel free to re-open after addressing the reason." | +| Area Triage | `+{AREALABEL}` | OpenSearch has many different areas. If it's unclear whether an issue should be accepted, it will be labeled with the area and an owner will be @mentioned for follow-up. | "Thanks for creating this issue; the triage meeting was unsure if this issue should be accepted, @{PERSON} or someone from the area please review and then accept or reject this issue?" | +| Transfer | N/A | If the issue applies to another repository within the OpenSearch Project, it will be transferred accordingly. | "@opensearch-project/triage, can you please transfer this issue to project {REPOSITORY}." Or, if someone at the meeting has permissions, they can start the transfer. | + +### Is this where I should bring up potential security vulnerabilities? + +Due to the sensitive nature of security vulnerabilities, please report all potential vulnerabilities directly by following the steps outlined on the [SECURITY.md](https://github.com/opensearch-project/OpenSearch/blob/main/SECURITY.md) document. + +### How do triage facilitator tag comments during the triage meeting? + +During the triage meeting, facilitators should use the tag _[Triage - attendees [1](#Profile_link) [2](#Profile_link)]_ to indicate a collective decision. This ensures contributors know the decision came from the meeting rather than an individual and identifies participants for any follow-up queries. + +This tag should not be used outside triage meetings. From 6d2d4ddc8b096e95b94b65142ed98437d96d8d29 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 17 Jan 2024 07:23:05 +0800 Subject: [PATCH 76/81] Add copy ingest processor (#11870) --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../ingest/common/CopyProcessor.java | 161 ++++++++ .../common/IngestCommonModulePlugin.java | 1 + .../common/CopyProcessorFactoryTests.java | 101 +++++ .../ingest/common/CopyProcessorTests.java | 125 ++++++ .../rest-api-spec/test/ingest/10_basic.yml | 17 + .../test/ingest/300_copy_processor.yml | 374 ++++++++++++++++++ .../org/opensearch/ingest/IngestDocument.java | 2 +- 8 files changed, 781 insertions(+), 1 deletion(-) create mode 100644 modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java create mode 100644 modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java create mode 100644 modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java create mode 100644 modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 14055f0d8462f..d1e970d097bfb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -126,6 +126,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add additional handling in SearchTemplateRequest when simulate is set to true ([#11591](https://github.com/opensearch-project/OpenSearch/pull/11591)) - Introduce cluster level setting `cluster.index.restrict.replication.type` to prevent replication type setting override during index creations([#11583](https://github.com/opensearch-project/OpenSearch/pull/11583)) - Add match_only_text field that is optimized for storage by trading off positional queries performance ([#6836](https://github.com/opensearch-project/OpenSearch/pull/11039)) +- Add copy ingest processor ([#11870](https://github.com/opensearch-project/OpenSearch/pull/11870)) - Introduce new feature flag "WRITEABLE_REMOTE_INDEX" to gate the writeable remote index functionality ([#11717](https://github.com/opensearch-project/OpenSearch/pull/11170)) - Bump OpenTelemetry from 1.32.0 to 1.34.1 ([#11891](https://github.com/opensearch-project/OpenSearch/pull/11891)) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java new file mode 100644 index 0000000000000..dec69df275130 --- /dev/null +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/CopyProcessor.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.core.common.Strings; +import org.opensearch.ingest.AbstractProcessor; +import org.opensearch.ingest.ConfigurationUtils; +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.script.ScriptService; +import org.opensearch.script.TemplateScript; + +import java.util.Map; + +public final class CopyProcessor extends AbstractProcessor { + public static final String TYPE = "copy"; + + private final TemplateScript.Factory sourceField; + private final TemplateScript.Factory targetField; + + private final boolean ignoreMissing; + + private final boolean removeSource; + + private final boolean overrideTarget; + + CopyProcessor(String tag, String description, TemplateScript.Factory sourceField, TemplateScript.Factory targetField) { + this(tag, description, sourceField, targetField, false, false, false); + } + + CopyProcessor( + String tag, + String description, + TemplateScript.Factory sourceField, + TemplateScript.Factory targetField, + boolean ignoreMissing, + boolean removeSource, + boolean overrideTarget + ) { + super(tag, description); + this.sourceField = sourceField; + this.targetField = targetField; + this.ignoreMissing = ignoreMissing; + this.removeSource = removeSource; + this.overrideTarget = overrideTarget; + } + + public TemplateScript.Factory getSourceField() { + return sourceField; + } + + public TemplateScript.Factory getTargetField() { + return targetField; + } + + public boolean isIgnoreMissing() { + return ignoreMissing; + } + + public boolean isRemoveSource() { + return removeSource; + } + + public boolean isOverrideTarget() { + return overrideTarget; + } + + @Override + public IngestDocument execute(IngestDocument document) { + String source = document.renderTemplate(sourceField); + final boolean sourceFieldPathIsNullOrEmpty = Strings.isNullOrEmpty(source); + if (sourceFieldPathIsNullOrEmpty || document.hasField(source, true) == false) { + if (ignoreMissing) { + return document; + } else if (sourceFieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("source field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("source field [" + source + "] doesn't exist"); + } + } + + String target = document.renderTemplate(targetField); + if (Strings.isNullOrEmpty(target)) { + throw new IllegalArgumentException("target field path cannot be null nor empty"); + } + if (source.equals(target)) { + throw new IllegalArgumentException("source field path and target field path cannot be same"); + } + + if (overrideTarget || document.hasField(target, true) == false || document.getFieldValue(target, Object.class) == null) { + Object sourceValue = document.getFieldValue(source, Object.class); + document.setFieldValue(target, IngestDocument.deepCopy(sourceValue)); + } else { + throw new IllegalArgumentException("target field [" + target + "] already exists"); + } + + if (removeSource) { + document.removeField(source); + } + + return document; + } + + @Override + public String getType() { + return TYPE; + } + + public static final class Factory implements Processor.Factory { + + private final ScriptService scriptService; + + public Factory(ScriptService scriptService) { + this.scriptService = scriptService; + } + + @Override + public CopyProcessor create( + Map registry, + String processorTag, + String description, + Map config + ) throws Exception { + String sourceField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "source_field"); + TemplateScript.Factory sourceFieldTemplate = ConfigurationUtils.compileTemplate( + TYPE, + processorTag, + "source_field", + sourceField, + scriptService + ); + String targetField = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "target_field"); + TemplateScript.Factory targetFieldTemplate = ConfigurationUtils.compileTemplate( + TYPE, + processorTag, + "target_field", + targetField, + scriptService + ); + boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); + boolean removeSource = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "remove_source", false); + boolean overrideTarget = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "override_target", false); + + return new CopyProcessor( + processorTag, + description, + sourceFieldTemplate, + targetFieldTemplate, + ignoreMissing, + removeSource, + overrideTarget + ); + } + } +} diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java index a2a51d968e078..7c1b4841122b0 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/IngestCommonModulePlugin.java @@ -106,6 +106,7 @@ public Map getProcessors(Processor.Parameters paramet processors.put(DropProcessor.TYPE, new DropProcessor.Factory()); processors.put(HtmlStripProcessor.TYPE, new HtmlStripProcessor.Factory()); processors.put(CsvProcessor.TYPE, new CsvProcessor.Factory()); + processors.put(CopyProcessor.TYPE, new CopyProcessor.Factory(parameters.scriptService)); return Collections.unmodifiableMap(processors); } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java new file mode 100644 index 0000000000000..c1ca86a49e334 --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorFactoryTests.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchParseException; +import org.opensearch.ingest.TestTemplateService; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class CopyProcessorFactoryTests extends OpenSearchTestCase { + + private CopyProcessor.Factory factory; + + @Before + public void init() { + factory = new CopyProcessor.Factory(TestTemplateService.instance()); + } + + public void testCreate() throws Exception { + boolean ignoreMissing = randomBoolean(); + boolean removeSource = randomBoolean(); + boolean overrideTarget = randomBoolean(); + Map config = new HashMap<>(); + config.put("source_field", "source"); + config.put("target_field", "target"); + config.put("ignore_missing", ignoreMissing); + config.put("remove_source", removeSource); + config.put("override_target", overrideTarget); + String processorTag = randomAlphaOfLength(10); + CopyProcessor copyProcessor = factory.create(null, processorTag, null, config); + assertThat(copyProcessor.getTag(), equalTo(processorTag)); + assertThat(copyProcessor.getSourceField().newInstance(Collections.emptyMap()).execute(), equalTo("source")); + assertThat(copyProcessor.getTargetField().newInstance(Collections.emptyMap()).execute(), equalTo("target")); + assertThat(copyProcessor.isIgnoreMissing(), equalTo(ignoreMissing)); + assertThat(copyProcessor.isRemoveSource(), equalTo(removeSource)); + assertThat(copyProcessor.isOverrideTarget(), equalTo(overrideTarget)); + } + + public void testCreateWithSourceField() throws Exception { + Map config = new HashMap<>(); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_field] required property is missing")); + } + + config.put("source_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[source_field] required property is missing")); + } + } + + public void testCreateWithTargetField() throws Exception { + Map config = new HashMap<>(); + config.put("source_field", "source"); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[target_field] required property is missing")); + } + + config.put("source_field", "source"); + config.put("target_field", null); + try { + factory.create(null, null, null, config); + fail("factory create should have failed"); + } catch (OpenSearchParseException e) { + assertThat(e.getMessage(), equalTo("[target_field] required property is missing")); + } + } + + public void testInvalidMustacheTemplate() throws Exception { + CopyProcessor.Factory factory = new CopyProcessor.Factory(TestTemplateService.instance(true)); + Map config = new HashMap<>(); + config.put("source_field", "{{source}}"); + config.put("target_field", "target"); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows(OpenSearchException.class, () -> factory.create(null, processorTag, null, config)); + assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script")); + assertThat(exception.getMetadata("opensearch.processor_tag").get(0), equalTo(processorTag)); + } + +} diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java new file mode 100644 index 0000000000000..f271bdd342d0b --- /dev/null +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/CopyProcessorTests.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.ingest.common; + +import org.opensearch.ingest.IngestDocument; +import org.opensearch.ingest.Processor; +import org.opensearch.ingest.RandomDocumentPicks; +import org.opensearch.ingest.TestTemplateService; +import org.opensearch.test.OpenSearchTestCase; + +import static org.hamcrest.Matchers.equalTo; + +public class CopyProcessorTests extends OpenSearchTestCase { + + public void testCopyExistingField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); + assertThat(ingestDocument.getFieldValue(targetFieldName, Object.class), equalTo(sourceValue)); + assertThat(ingestDocument.getFieldValue(sourceFieldName, Object.class), equalTo(sourceValue)); + + Processor processorWithEmptyTarget = createCopyProcessor(sourceFieldName, "", false, false, false); + assertThrows( + "target field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyTarget.execute(ingestDocument) + ); + + Processor processorWithSameSourceAndTarget = createCopyProcessor(sourceFieldName, sourceFieldName, false, false, false); + assertThrows( + "source field path and target field path cannot be same", + IllegalArgumentException.class, + () -> processorWithSameSourceAndTarget.execute(ingestDocument) + ); + } + + public void testCopyWithIgnoreMissing() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Processor processor = createCopyProcessor("non-existing-field", targetFieldName, false, false, false); + assertThrows( + "source field [non-existing-field] doesn't exist", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + + Processor processorWithEmptyFieldName = createCopyProcessor("", targetFieldName, false, false, false); + assertThrows( + "source field path cannot be null nor empty", + IllegalArgumentException.class, + () -> processorWithEmptyFieldName.execute(ingestDocument) + ); + + Processor processorWithIgnoreMissing = createCopyProcessor("non-existing-field", targetFieldName, true, false, false); + processorWithIgnoreMissing.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(false)); + } + + public void testCopyWithRemoveSource() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String sourceFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + String targetFieldName = RandomDocumentPicks.randomFieldName(random()); + Object sourceValue = ingestDocument.getFieldValue(sourceFieldName, Object.class); + + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, true, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, Object.class), equalTo(sourceValue)); + assertThat(ingestDocument.hasField(sourceFieldName), equalTo(false)); + } + + public void testCopyToExistingField() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + String targetFieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument); + Object sourceValue = RandomDocumentPicks.randomFieldValue(random()); + String sourceFieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, sourceValue); + + Processor processor = createCopyProcessor(sourceFieldName, targetFieldName, false, false, false); + assertThrows( + "target field [" + targetFieldName + "] already exists", + IllegalArgumentException.class, + () -> processor.execute(ingestDocument) + ); + + // if override_target is false but target field's value is null, copy can execute successfully + String targetFieldWithNullValue = RandomDocumentPicks.addRandomField(random(), ingestDocument, null); + Processor processorWithTargetNullValue = createCopyProcessor(sourceFieldName, targetFieldWithNullValue, false, false, false); + processorWithTargetNullValue.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldWithNullValue), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldWithNullValue, Object.class), equalTo(sourceValue)); + + Processor processorWithOverrideTargetIsTrue = createCopyProcessor(sourceFieldName, targetFieldName, false, false, true); + processorWithOverrideTargetIsTrue.execute(ingestDocument); + assertThat(ingestDocument.hasField(targetFieldName), equalTo(true)); + assertThat(ingestDocument.getFieldValue(targetFieldName, Object.class), equalTo(sourceValue)); + } + + private static Processor createCopyProcessor( + String sourceFieldName, + String targetFieldName, + boolean ignoreMissing, + boolean removeSource, + boolean overrideTarget + ) { + return new CopyProcessor( + randomAlphaOfLength(10), + null, + new TestTemplateService.MockTemplateScript.Factory(sourceFieldName), + new TestTemplateService.MockTemplateScript.Factory(targetFieldName), + ignoreMissing, + removeSource, + overrideTarget + ); + } +} diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml index f44cc1f9f9fcf..0719082c887f2 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/10_basic.yml @@ -36,3 +36,20 @@ - contains: { nodes.$cluster_manager.ingest.processors: { type: split } } - contains: { nodes.$cluster_manager.ingest.processors: { type: trim } } - contains: { nodes.$cluster_manager.ingest.processors: { type: uppercase } } + +--- +"Copy processor exists": + - skip: + version: " - 2.11.99" + features: contains + reason: "copy processor was introduced in 2.12.0 and contains is a newly added assertion" + - do: + cluster.state: {} + + # Get cluster-manager node id + - set: { cluster_manager_node: cluster_manager } + + - do: + nodes.info: {} + + - contains: { nodes.$cluster_manager.ingest.processors: { type: copy } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml new file mode 100644 index 0000000000000..0203b62ba67d6 --- /dev/null +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/300_copy_processor.yml @@ -0,0 +1,374 @@ +--- +teardown: + - do: + ingest.delete_pipeline: + id: "1" + ignore: 404 + +--- +"Test creat copy processor": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + catch: /\[target\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "source" + } + } + ] + } + - do: + catch: /\[source\_field\] required property is missing/ + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "target_field" : "target" + } + } + ] + } + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "source", + "target_field" : "target", + "ignore_missing" : true, + "remove_source" : true, + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + +--- +"Test copy processor with ignore_missing": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "unknown_field", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field \[unknown\_field\] doesn\'t exist/ + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "unknown_field", + "target_field" : "bar", + "ignore_missing" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello" } } + +--- +"Test copy processor with remove_source": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello", bar: "hello" } } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar", + "remove_source" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello" + } + - do: + get: + index: test + id: 1 + - match: { _source: { bar: "hello" } } + +--- +"Test copy processor with override_target": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /target field \[bar\] already exists/ + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "foo", + "target_field" : "bar", + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + foo: "hello", + bar: "world" + } + - do: + get: + index: test + id: 1 + - match: { _source: { foo: "hello", bar: "hello" } } + +--- +"Test copy processor with template snippets": + - skip: + version: " - 2.11.99" + reason: "introduced in 2.12" + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + target: "bar", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /target field path cannot be null nor empty/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}" + } + } + ] + } + - match: { acknowledged: true } + + - do: + catch: /source field path and target field path cannot be same/ + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "foo", + foo: "hello", + bar: "world" + } + + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors": [ + { + "copy" : { + "source_field" : "{{source}}", + "target_field" : "{{target}}", + "override_target" : true + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "1" + body: { + source: "foo", + target: "bar", + foo: "hello", + bar: "world" + } + - do: + get: + index: test + id: 1 + - match: { _source: { source: "foo", target: "bar", foo: "hello", bar: "hello" } } diff --git a/server/src/main/java/org/opensearch/ingest/IngestDocument.java b/server/src/main/java/org/opensearch/ingest/IngestDocument.java index 10e9e64db561e..d975b0014de1f 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/opensearch/ingest/IngestDocument.java @@ -757,7 +757,7 @@ public static Map deepCopyMap(Map source) { return (Map) deepCopy(source); } - private static Object deepCopy(Object value) { + public static Object deepCopy(Object value) { if (value instanceof Map) { Map mapValue = (Map) value; Map copy = new HashMap<>(mapValue.size()); From 517f091dce6a268060772f4b5740b40ce9d6ee22 Mon Sep 17 00:00:00 2001 From: Ashish Date: Wed, 17 Jan 2024 10:14:43 +0530 Subject: [PATCH 77/81] Fix flaky RemoteStoreRefreshListenerTests test #11256 (#11803) Signed-off-by: Ashish Singh --- .../index/shard/RemoteStoreRefreshListenerTests.java | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 811d6a722d0f6..555284e55a0fa 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -418,7 +418,7 @@ public void testTrackerData() throws Exception { RemoteStoreRefreshListener listener = tuple.v1(); RemoteStoreStatsTrackerFactory trackerFactory = tuple.v2(); RemoteSegmentTransferTracker tracker = trackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); - assertNoLag(tracker); + assertBusy(() -> assertNoLag(tracker)); indexDocs(100, randomIntBetween(100, 200)); indexShard.refresh("test"); listener.afterRefresh(true); @@ -473,6 +473,14 @@ private Tuple mockIn new InternalEngineFactory() ); + RemoteSegmentTransferTracker tracker = indexShard.getRemoteStoreStatsTrackerFactory() + .getRemoteSegmentTransferTracker(indexShard.shardId()); + try { + assertBusy(() -> assertTrue(tracker.getTotalUploadsSucceeded() > 0)); + } catch (Exception e) { + assert false; + } + indexDocs(1, randomIntBetween(1, 100)); // Mock indexShard.store().directory() @@ -554,7 +562,6 @@ private Tuple mockIn RecoverySettings recoverySettings = mock(RecoverySettings.class); when(recoverySettings.getMinRemoteSegmentMetadataFiles()).thenReturn(10); when(shard.getRecoverySettings()).thenReturn(recoverySettings); - RemoteSegmentTransferTracker tracker = remoteStoreStatsTrackerFactory.getRemoteSegmentTransferTracker(indexShard.shardId()); RemoteStoreRefreshListener refreshListener = new RemoteStoreRefreshListener(shard, emptyCheckpointPublisher, tracker); refreshListener.afterRefresh(true); return Tuple.tuple(refreshListener, remoteStoreStatsTrackerFactory); From 5dd4b61e83d9bbbdfd7fd490de4e8336bcc8e4f8 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 17 Jan 2024 21:14:10 +0800 Subject: [PATCH 78/81] Remove ingest processor supports excluding fields (#10967) * Remove ingest processor supports field patterns and excluding fields Signed-off-by: Gao Binlong * Format some code Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Remove the code of field pattern Signed-off-by: Gao Binlong * Add skip version in rest test yml Signed-off-by: Gao Binlong * Make fields and exclude_fields mutually exclusive when constructing RemoveProcessor Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../ingest/common/RemoveProcessor.java | 165 +++++++++++++----- .../common/RemoveProcessorFactoryTests.java | 38 ++-- .../ingest/common/RemoveProcessorTests.java | 46 +++++ .../test/ingest/290_remove_processor.yml | 40 +++++ .../opensearch/ingest/ConfigurationUtils.java | 8 + 6 files changed, 244 insertions(+), 54 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d1e970d097bfb..d083406d63518 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255)) - Per request phase latency ([#10351](https://github.com/opensearch-project/OpenSearch/issues/10351)) - Add cluster state stats ([#10670](https://github.com/opensearch-project/OpenSearch/pull/10670)) +- Remove ingest processor supports excluding fields ([#10967](https://github.com/opensearch-project/OpenSearch/pull/10967)) - [Tiered caching] Enabling serialization for IndicesRequestCache key object ([#10275](https://github.com/opensearch-project/OpenSearch/pull/10275)) - [Tiered caching] Defining interfaces, listeners and extending IndicesRequestCache with Tiered cache support ([#10753](https://github.com/opensearch-project/OpenSearch/pull/10753)) - [Remote cluster state] Restore cluster state version during remote state auto restore ([#10853](https://github.com/opensearch-project/OpenSearch/pull/10853)) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java index a48cfd87b78c3..d01dce02fca31 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/RemoveProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest.common; +import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; import org.opensearch.index.VersionType; import org.opensearch.ingest.AbstractProcessor; @@ -42,11 +43,15 @@ import org.opensearch.script.TemplateScript; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; + /** * Processor that removes existing fields. Nothing happens if the field is not present. */ @@ -55,11 +60,28 @@ public final class RemoveProcessor extends AbstractProcessor { public static final String TYPE = "remove"; private final List fields; + private final List excludeFields; private final boolean ignoreMissing; - RemoveProcessor(String tag, String description, List fields, boolean ignoreMissing) { + RemoveProcessor( + String tag, + String description, + @Nullable List fields, + @Nullable List excludeFields, + boolean ignoreMissing + ) { super(tag, description); - this.fields = new ArrayList<>(fields); + if (fields == null && excludeFields == null || fields != null && excludeFields != null) { + throw new IllegalArgumentException("ether fields and excludeFields must be set"); + } + if (fields != null) { + this.fields = new ArrayList<>(fields); + this.excludeFields = null; + } else { + this.fields = null; + this.excludeFields = new ArrayList<>(excludeFields); + } + this.ignoreMissing = ignoreMissing; } @@ -67,42 +89,76 @@ public List getFields() { return fields; } + public List getExcludeFields() { + return excludeFields; + } + @Override public IngestDocument execute(IngestDocument document) { - fields.forEach(field -> { - String path = document.renderTemplate(field); - final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); - if (fieldPathIsNullOrEmpty || document.hasField(path) == false) { - if (ignoreMissing) { - return; - } else if (fieldPathIsNullOrEmpty) { - throw new IllegalArgumentException("field path cannot be null nor empty"); - } else { - throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + if (fields != null && !fields.isEmpty()) { + fields.forEach(field -> { + String path = document.renderTemplate(field); + final boolean fieldPathIsNullOrEmpty = Strings.isNullOrEmpty(path); + if (fieldPathIsNullOrEmpty || document.hasField(path) == false) { + if (ignoreMissing) { + return; + } else if (fieldPathIsNullOrEmpty) { + throw new IllegalArgumentException("field path cannot be null nor empty"); + } else { + throw new IllegalArgumentException("field [" + path + "] doesn't exist"); + } } - } - // cannot remove _index, _version and _version_type. - if (path.equals(IngestDocument.Metadata.INDEX.getFieldName()) - || path.equals(IngestDocument.Metadata.VERSION.getFieldName()) - || path.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { - throw new IllegalArgumentException("cannot remove metadata field [" + path + "]"); - } - // removing _id is disallowed when there's an external version specified in the request - if (path.equals(IngestDocument.Metadata.ID.getFieldName()) - && document.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { - String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); - if (!Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { - Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class, true); - throw new IllegalArgumentException( - "cannot remove metadata field [_id] when specifying external version for the document, version: " - + version - + ", version_type: " - + versionType - ); + + // cannot remove _index, _version and _version_type. + if (path.equals(IngestDocument.Metadata.INDEX.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION.getFieldName()) + || path.equals(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + throw new IllegalArgumentException("cannot remove metadata field [" + path + "]"); } + // removing _id is disallowed when there's an external version specified in the request + if (path.equals(IngestDocument.Metadata.ID.getFieldName()) + && document.hasField(IngestDocument.Metadata.VERSION_TYPE.getFieldName())) { + String versionType = document.getFieldValue(IngestDocument.Metadata.VERSION_TYPE.getFieldName(), String.class); + if (!Objects.equals(versionType, VersionType.toString(VersionType.INTERNAL))) { + Long version = document.getFieldValue(IngestDocument.Metadata.VERSION.getFieldName(), Long.class, true); + throw new IllegalArgumentException( + "cannot remove metadata field [_id] when specifying external version for the document, version: " + + version + + ", version_type: " + + versionType + ); + } + } + document.removeField(path); + }); + } + + if (excludeFields != null && !excludeFields.isEmpty()) { + Set excludeFieldSet = new HashSet<>(); + excludeFields.forEach(field -> { + String path = document.renderTemplate(field); + // ignore the empty or null field path + if (!Strings.isNullOrEmpty(path)) { + excludeFieldSet.add(path); + } + }); + + if (!excludeFieldSet.isEmpty()) { + Set existingFields = new HashSet<>(document.getSourceAndMetadata().keySet()); + Set metadataFields = document.getMetadata() + .keySet() + .stream() + .map(IngestDocument.Metadata::getFieldName) + .collect(Collectors.toSet()); + existingFields.forEach(field -> { + // ignore metadata fields such as _index, _id, etc. + if (!metadataFields.contains(field) && !excludeFieldSet.contains(field)) { + document.removeField(field); + } + }); } - document.removeField(path); - }); + } + return document; } @@ -127,20 +183,41 @@ public RemoveProcessor create( Map config ) throws Exception { final List fields = new ArrayList<>(); - final Object field = ConfigurationUtils.readObject(TYPE, processorTag, config, "field"); - if (field instanceof List) { - @SuppressWarnings("unchecked") - List stringList = (List) field; - fields.addAll(stringList); - } else { - fields.add((String) field); + final List excludeFields = new ArrayList<>(); + final Object field = ConfigurationUtils.readOptionalObject(config, "field"); + final Object excludeField = ConfigurationUtils.readOptionalObject(config, "exclude_field"); + + if (field == null && excludeField == null || field != null && excludeField != null) { + throw newConfigurationException(TYPE, processorTag, "field", "ether field or exclude_field must be set"); } - final List compiledTemplates = fields.stream() - .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", f, scriptService)) - .collect(Collectors.toList()); boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "ignore_missing", false); - return new RemoveProcessor(processorTag, description, compiledTemplates, ignoreMissing); + + if (field != null) { + if (field instanceof List) { + @SuppressWarnings("unchecked") + List stringList = (List) field; + fields.addAll(stringList); + } else { + fields.add((String) field); + } + List fieldCompiledTemplates = fields.stream() + .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "field", f, scriptService)) + .collect(Collectors.toList()); + return new RemoveProcessor(processorTag, description, fieldCompiledTemplates, null, ignoreMissing); + } else { + if (excludeField instanceof List) { + @SuppressWarnings("unchecked") + List stringList = (List) excludeField; + excludeFields.addAll(stringList); + } else { + excludeFields.add((String) excludeField); + } + List excludeFieldCompiledTemplates = excludeFields.stream() + .map(f -> ConfigurationUtils.compileTemplate(TYPE, processorTag, "exclude_field", f, scriptService)) + .collect(Collectors.toList()); + return new RemoveProcessor(processorTag, description, null, excludeFieldCompiledTemplates, ignoreMissing); + } } } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java index 66ca888a0d39f..179aef2feac0c 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorFactoryTests.java @@ -41,6 +41,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -79,16 +80,6 @@ public void testCreateMultipleFields() throws Exception { ); } - public void testCreateMissingField() throws Exception { - Map config = new HashMap<>(); - try { - factory.create(null, null, null, config); - fail("factory create should have failed"); - } catch (OpenSearchParseException e) { - assertThat(e.getMessage(), equalTo("[field] required property is missing")); - } - } - public void testInvalidMustacheTemplate() throws Exception { RemoveProcessor.Factory factory = new RemoveProcessor.Factory(TestTemplateService.instance(true)); Map config = new HashMap<>(); @@ -98,4 +89,31 @@ public void testInvalidMustacheTemplate() throws Exception { assertThat(exception.getMessage(), equalTo("java.lang.RuntimeException: could not compile script")); assertThat(exception.getMetadata("opensearch.processor_tag").get(0), equalTo(processorTag)); } + + public void testCreateWithExcludeField() throws Exception { + Map config = new HashMap<>(); + String processorTag = randomAlphaOfLength(10); + OpenSearchException exception = expectThrows( + OpenSearchParseException.class, + () -> factory.create(null, processorTag, null, config) + ); + assertThat(exception.getMessage(), equalTo("[field] ether field or exclude_field must be set")); + + Map config2 = new HashMap<>(); + config2.put("field", "field1"); + config2.put("exclude_field", "field2"); + exception = expectThrows(OpenSearchParseException.class, () -> factory.create(null, processorTag, null, config2)); + assertThat(exception.getMessage(), equalTo("[field] ether field or exclude_field must be set")); + + Map config6 = new HashMap<>(); + config6.put("exclude_field", "exclude_field"); + RemoveProcessor removeProcessor = factory.create(null, processorTag, null, config6); + assertThat( + removeProcessor.getExcludeFields() + .stream() + .map(template -> template.newInstance(Collections.emptyMap()).execute()) + .collect(Collectors.toList()), + equalTo(List.of("exclude_field")) + ); + } } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java index c138ad606d2e5..78a3d36124d45 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/RemoveProcessorTests.java @@ -38,8 +38,10 @@ import org.opensearch.ingest.Processor; import org.opensearch.ingest.RandomDocumentPicks; import org.opensearch.ingest.TestTemplateService; +import org.opensearch.script.TemplateScript; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -57,12 +59,28 @@ public void testRemoveFields() throws Exception { randomAlphaOfLength(10), null, Collections.singletonList(new TestTemplateService.MockTemplateScript.Factory(field)), + null, false ); processor.execute(ingestDocument); assertThat(ingestDocument.hasField(field), equalTo(false)); } + public void testRemoveByExcludeFields() throws Exception { + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + ingestDocument.setFieldValue("foo_1", "value"); + ingestDocument.setFieldValue("foo_2", "value"); + ingestDocument.setFieldValue("foo_3", "value"); + List excludeFields = new ArrayList<>(); + excludeFields.add(new TestTemplateService.MockTemplateScript.Factory("foo_1")); + excludeFields.add(new TestTemplateService.MockTemplateScript.Factory("foo_2")); + Processor processor = new RemoveProcessor(randomAlphaOfLength(10), null, null, excludeFields, false); + processor.execute(ingestDocument); + assertThat(ingestDocument.hasField("foo_1"), equalTo(true)); + assertThat(ingestDocument.hasField("foo_2"), equalTo(true)); + assertThat(ingestDocument.hasField("foo_3"), equalTo(false)); + } + public void testRemoveNonExistingField() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); @@ -183,6 +201,34 @@ public void testRemoveMetadataField() throws Exception { } } + public void testCreateRemoveProcessorWithBothFieldsAndExcludeFields() throws Exception { + assertThrows( + "ether fields and excludeFields must be set", + IllegalArgumentException.class, + () -> new RemoveProcessor(randomAlphaOfLength(10), null, null, null, false) + ); + + final List fields; + if (randomBoolean()) { + fields = new ArrayList<>(); + } else { + fields = List.of(new TestTemplateService.MockTemplateScript.Factory("foo_1")); + } + + final List excludeFields; + if (randomBoolean()) { + excludeFields = new ArrayList<>(); + } else { + excludeFields = List.of(new TestTemplateService.MockTemplateScript.Factory("foo_2")); + } + + assertThrows( + "ether fields and excludeFields must be set", + IllegalArgumentException.class, + () -> new RemoveProcessor(randomAlphaOfLength(10), null, fields, excludeFields, false) + ); + } + public void testRemoveDocumentId() throws Exception { Map config = new HashMap<>(); config.put("field", IngestDocument.Metadata.ID.getFieldName()); diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml index 6668b468f8edc..e120a865052b0 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/290_remove_processor.yml @@ -319,3 +319,43 @@ teardown: } - match: { docs.0.error.type: "illegal_argument_exception" } - match: { docs.0.error.reason: "cannot remove metadata field [_id] when specifying external version for the document, version: 1, version_type: external_gte" } + +# Related issue: https://github.com/opensearch-project/OpenSearch/issues/1578 +--- +"Test remove processor with exclude_field": + - skip: + version: " - 2.11.99" + reason: "exclude_field is introduced in 2.12" + - do: + ingest.put_pipeline: + id: "my_pipeline" + body: > + { + "description": "_description", + "processors": [ + { + "remove" : { + "exclude_field": "bar" + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: 1 + pipeline: "my_pipeline" + body: { + foo1: "bar", + foo2: "bar", + bar: "zoo", + zoo: "bar" + } + + - do: + get: + index: test + id: 1 + - match: { _source: { bar: "zoo"}} diff --git a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java index 5185b740d90cb..a2c2137130587 100644 --- a/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/opensearch/ingest/ConfigurationUtils.java @@ -387,6 +387,7 @@ private static Map readMap(String processorType, String processor /** * Returns and removes the specified property as an {@link Object} from the specified configuration map. + * If the property is missing an {@link OpenSearchParseException} is thrown */ public static Object readObject(String processorType, String processorTag, Map configuration, String propertyName) { Object value = configuration.remove(propertyName); @@ -396,6 +397,13 @@ public static Object readObject(String processorType, String processorTag, Map configuration, String propertyName) { + return configuration.remove(propertyName); + } + public static OpenSearchException newConfigurationException( String processorType, String processorTag, From d5a6f99d9c8c3e34628e4807d351001647f235c5 Mon Sep 17 00:00:00 2001 From: bowenlan-amzn Date: Wed, 17 Jan 2024 09:29:52 -0800 Subject: [PATCH 79/81] Apply the fast filter optimization to composite aggregation (#11505) We can do efficient DateHistogram aggregation under composite aggregations. --------- Signed-off-by: bowenlan-amzn --- CHANGELOG.md | 1 + .../bucket/FastFilterRewriteHelper.java | 385 ++++++++++++++++++ .../bucket/composite/CompositeAggregator.java | 98 ++++- .../bucket/composite/CompositeKey.java | 6 +- .../CompositeValuesCollectorQueue.java | 30 +- .../CompositeValuesSourceConfig.java | 2 +- .../DateHistogramValuesSourceBuilder.java | 2 +- .../bucket/composite/InternalComposite.java | 6 +- .../composite/PointsSortedDocsProducer.java | 6 +- .../composite/RoundingValuesSource.java | 24 +- .../AutoDateHistogramAggregator.java | 67 ++- .../histogram/DateHistogramAggregator.java | 60 ++- .../bucket/histogram/FilterRewriteHelper.java | 259 ------------ .../composite/CompositeAggregatorTests.java | 17 +- 14 files changed, 585 insertions(+), 378 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java delete mode 100644 server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java diff --git a/CHANGELOG.md b/CHANGELOG.md index d083406d63518..ea96035807276 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -188,6 +188,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Performance improvement for MultiTerm Queries on Keyword fields ([#7057](https://github.com/opensearch-project/OpenSearch/issues/7057)) - Refactor common parts from the Rounding class into a separate 'round' package ([#11023](https://github.com/opensearch-project/OpenSearch/issues/11023)) - Performance improvement for date histogram aggregations without sub-aggregations ([#11083](https://github.com/opensearch-project/OpenSearch/pull/11083)) +- Apply the fast filter optimization to composite aggregation of date histogram source ([#11505](https://github.com/opensearch-project/OpenSearch/pull/11083)) - Disable concurrent aggs for Diversified Sampler and Sampler aggs ([#11087](https://github.com/opensearch-project/OpenSearch/issues/11087)) - Made leader/follower check timeout setting dynamic ([#10528](https://github.com/opensearch-project/OpenSearch/pull/10528)) - Improved performance of numeric exact-match queries ([#11209](https://github.com/opensearch-project/OpenSearch/pull/11209)) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java new file mode 100644 index 0000000000000..f377287d0b3bd --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/FastFilterRewriteHelper.java @@ -0,0 +1,385 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.bucket; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.IndexOrDocValuesQuery; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.Rounding; +import org.opensearch.common.lucene.search.function.FunctionScoreQuery; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.query.DateRangeIncludingNowQuery; +import org.opensearch.search.DocValueFormat; +import org.opensearch.search.aggregations.bucket.composite.CompositeKey; +import org.opensearch.search.aggregations.bucket.composite.CompositeValuesSourceConfig; +import org.opensearch.search.aggregations.bucket.composite.RoundingValuesSource; +import org.opensearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.OptionalLong; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * Utility class to help rewrite aggregations into filters. + * Instead of aggregation collects documents one by one, filter may count all documents that match in one pass. + *

+ * Currently supported rewrite: + *

    + *
  • date histogram : date range filter. + * Applied: DateHistogramAggregator, AutoDateHistogramAggregator, CompositeAggregator
  • + *
+ * + * @opensearch.internal + */ +public final class FastFilterRewriteHelper { + + private FastFilterRewriteHelper() {} + + private static final int MAX_NUM_FILTER_BUCKETS = 1024; + private static final Map, Function> queryWrappers; + + // Initialize the wrapper map for unwrapping the query + static { + queryWrappers = new HashMap<>(); + queryWrappers.put(ConstantScoreQuery.class, q -> ((ConstantScoreQuery) q).getQuery()); + queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); + queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); + queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); + } + + /** + * Recursively unwraps query into the concrete form + * for applying the optimization + */ + private static Query unwrapIntoConcreteQuery(Query query) { + while (queryWrappers.containsKey(query.getClass())) { + query = queryWrappers.get(query.getClass()).apply(query); + } + + return query; + } + + /** + * Finds the min and max bounds of field values for the shard + */ + private static long[] getIndexBounds(final SearchContext context, final String fieldName) throws IOException { + final List leaves = context.searcher().getIndexReader().leaves(); + long min = Long.MAX_VALUE, max = Long.MIN_VALUE; + // Since the query does not specify bounds for aggregation, we can + // build the global min/max from local min/max within each segment + for (LeafReaderContext leaf : leaves) { + final PointValues values = leaf.reader().getPointValues(fieldName); + if (values != null) { + min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); + max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); + } + } + + if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) return null; + + return new long[] { min, max }; + } + + /** + * This method also acts as a pre-condition check for the optimization, + * returns null if the optimization cannot be applied + */ + public static long[] getAggregationBounds(final SearchContext context, final String fieldName) throws IOException { + final Query cq = unwrapIntoConcreteQuery(context.query()); + final long[] indexBounds = getIndexBounds(context, fieldName); + if (cq instanceof PointRangeQuery) { + final PointRangeQuery prq = (PointRangeQuery) cq; + // Ensure that the query and aggregation are on the same field + if (prq.getField().equals(fieldName)) { + return new long[] { + // Minimum bound for aggregation is the max between query and global + Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]), + // Maximum bound for aggregation is the min between query and global + Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]) }; + } + } else if (cq instanceof MatchAllDocsQuery) { + return indexBounds; + } + // Check if the top-level query (which may be a PRQ on another field) is functionally match-all + Weight weight = context.searcher().createWeight(context.query(), ScoreMode.COMPLETE_NO_SCORES, 1f); + for (LeafReaderContext ctx : context.searcher().getIndexReader().leaves()) { + if (weight.count(ctx) != ctx.reader().numDocs()) { + return null; + } + } + return indexBounds; + } + + /** + * Creates the date range filters for aggregations using the interval, min/max + * bounds and the rounding values + */ + private static Weight[] createFilterForAggregations( + final SearchContext context, + final long interval, + final Rounding.Prepared preparedRounding, + final String field, + final DateFieldMapper.DateFieldType fieldType, + long low, + final long high + ) throws IOException { + // Calculate the number of buckets using range and interval + long roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + long prevRounded = roundedLow; + int bucketCount = 0; + while (roundedLow <= fieldType.convertNanosToMillis(high)) { + bucketCount++; + if (bucketCount > MAX_NUM_FILTER_BUCKETS) return null; + // Below rounding is needed as the interval could return in + // non-rounded values for something like calendar month + roundedLow = preparedRounding.round(roundedLow + interval); + if (prevRounded == roundedLow) break; // prevents getting into an infinite loop + prevRounded = roundedLow; + } + + Weight[] filters = null; + if (bucketCount > 0) { + filters = new Weight[bucketCount]; + roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); + + int i = 0; + while (i < bucketCount) { + // Calculate the lower bucket bound + final byte[] lower = new byte[8]; + NumericUtils.longToSortableBytes(i == 0 ? low : fieldType.convertRoundedMillisToNanos(roundedLow), lower, 0); + + // Calculate the upper bucket bound + roundedLow = preparedRounding.round(roundedLow + interval); + final byte[] upper = new byte[8]; + NumericUtils.longToSortableBytes(i + 1 == bucketCount ? high : + // Subtract -1 if the minimum is roundedLow as roundedLow itself + // is included in the next bucket + fieldType.convertRoundedMillisToNanos(roundedLow) - 1, upper, 0); + + filters[i++] = context.searcher().createWeight(new PointRangeQuery(field, lower, upper, 1) { + @Override + protected String toString(int dimension, byte[] value) { + return null; + } + }, ScoreMode.COMPLETE_NO_SCORES, 1); + } + } + + return filters; + } + + /** + * Context object to do fast filter optimization + */ + public static class FastFilterContext { + private Weight[] filters = null; + public AggregationType aggregationType; + + public FastFilterContext() {} + + private void setFilters(Weight[] filters) { + this.filters = filters; + } + + public void setAggregationType(AggregationType aggregationType) { + this.aggregationType = aggregationType; + } + + public boolean isRewriteable(final Object parent, final int subAggLength) { + return aggregationType.isRewriteable(parent, subAggLength); + } + + /** + * This filter build method is for date histogram aggregation type + * + * @param computeBounds get the lower and upper bound of the field in a shard search + * @param roundingFunction produce Rounding that contains interval of date range. + * Rounding is computed dynamically using the bounds in AutoDateHistogram + * @param preparedRoundingSupplier produce PreparedRounding to round values at call-time + */ + public void buildFastFilter( + SearchContext context, + CheckedFunction computeBounds, + Function roundingFunction, + Supplier preparedRoundingSupplier + ) throws IOException { + assert this.aggregationType instanceof DateHistogramAggregationType; + DateHistogramAggregationType aggregationType = (DateHistogramAggregationType) this.aggregationType; + DateFieldMapper.DateFieldType fieldType = aggregationType.getFieldType(); + final long[] bounds = computeBounds.apply(aggregationType); + if (bounds == null) return; + + final Rounding rounding = roundingFunction.apply(bounds); + final OptionalLong intervalOpt = Rounding.getInterval(rounding); + if (intervalOpt.isEmpty()) return; + final long interval = intervalOpt.getAsLong(); + + // afterKey is the last bucket key in previous response, while the bucket key + // is the start of the bucket values, so add the interval + if (aggregationType instanceof CompositeAggregationType && ((CompositeAggregationType) aggregationType).afterKey != -1) { + bounds[0] = ((CompositeAggregationType) aggregationType).afterKey + interval; + } + + final Weight[] filters = FastFilterRewriteHelper.createFilterForAggregations( + context, + interval, + preparedRoundingSupplier.get(), + fieldType.name(), + fieldType, + bounds[0], + bounds[1] + ); + this.setFilters(filters); + } + } + + /** + * Different types have different pre-conditions, filter building logic, etc. + */ + public interface AggregationType { + boolean isRewriteable(Object parent, int subAggLength); + } + + /** + * For date histogram aggregation + */ + public static class DateHistogramAggregationType implements AggregationType { + private final MappedFieldType fieldType; + private final boolean missing; + private final boolean hasScript; + + public DateHistogramAggregationType(MappedFieldType fieldType, boolean missing, boolean hasScript) { + this.fieldType = fieldType; + this.missing = missing; + this.hasScript = hasScript; + } + + @Override + public boolean isRewriteable(Object parent, int subAggLength) { + if (parent == null && subAggLength == 0 && !missing && !hasScript) { + return fieldType != null && fieldType instanceof DateFieldMapper.DateFieldType; + } + return false; + } + + public DateFieldMapper.DateFieldType getFieldType() { + assert fieldType instanceof DateFieldMapper.DateFieldType; + return (DateFieldMapper.DateFieldType) fieldType; + } + } + + /** + * For composite aggregation with date histogram as a source + */ + public static class CompositeAggregationType extends DateHistogramAggregationType { + private final RoundingValuesSource valuesSource; + private long afterKey = -1L; + private final int size; + + public CompositeAggregationType( + CompositeValuesSourceConfig[] sourceConfigs, + CompositeKey rawAfterKey, + List formats, + int size + ) { + super(sourceConfigs[0].fieldType(), sourceConfigs[0].missingBucket(), sourceConfigs[0].hasScript()); + this.valuesSource = (RoundingValuesSource) sourceConfigs[0].valuesSource(); + this.size = size; + if (rawAfterKey != null) { + assert rawAfterKey.size() == 1 && formats.size() == 1; + this.afterKey = formats.get(0).parseLong(rawAfterKey.get(0).toString(), false, () -> { + throw new IllegalArgumentException("now() is not supported in [after] key"); + }); + } + } + + public Rounding getRounding() { + return valuesSource.getRounding(); + } + + public Rounding.Prepared getRoundingPreparer() { + return valuesSource.getPreparedRounding(); + } + } + + public static boolean isCompositeAggRewriteable(CompositeValuesSourceConfig[] sourceConfigs) { + return sourceConfigs.length == 1 && sourceConfigs[0].valuesSource() instanceof RoundingValuesSource; + } + + public static long getBucketOrd(long bucketOrd) { + if (bucketOrd < 0) { // already seen + bucketOrd = -1 - bucketOrd; + } + + return bucketOrd; + } + + /** + * This is executed for each segment by passing the leaf reader context + * + * @param incrementDocCount takes in the bucket key value and the bucket count + */ + public static boolean tryFastFilterAggregation( + final LeafReaderContext ctx, + FastFilterContext fastFilterContext, + final BiConsumer incrementDocCount + ) throws IOException { + if (fastFilterContext == null) return false; + if (fastFilterContext.filters == null) return false; + + final Weight[] filters = fastFilterContext.filters; + final int[] counts = new int[filters.length]; + int i; + for (i = 0; i < filters.length; i++) { + counts[i] = filters[i].count(ctx); + if (counts[i] == -1) { + // Cannot use the optimization if any of the counts + // is -1 indicating the segment might have deleted documents + return false; + } + } + + int s = 0; + int size = Integer.MAX_VALUE; + for (i = 0; i < filters.length; i++) { + if (counts[i] > 0) { + long bucketKey = i; // the index of filters is the key for filters aggregation + if (fastFilterContext.aggregationType instanceof DateHistogramAggregationType) { + final DateFieldMapper.DateFieldType fieldType = ((DateHistogramAggregationType) fastFilterContext.aggregationType) + .getFieldType(); + bucketKey = fieldType.convertNanosToMillis( + NumericUtils.sortableBytesToLong(((PointRangeQuery) filters[i].getQuery()).getLowerPoint(), 0) + ); + if (fastFilterContext.aggregationType instanceof CompositeAggregationType) { + size = ((CompositeAggregationType) fastFilterContext.aggregationType).size; + } + } + incrementDocCount.accept(bucketKey, counts[i]); + s++; + if (s > size) return true; + } + } + + return true; + } +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index 317c2a357bac5..822b8a6c4b118 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -56,7 +56,9 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.comparators.LongComparator; import org.apache.lucene.util.Bits; +import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.RoaringDocIdSet; +import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; import org.opensearch.index.IndexSortConfig; import org.opensearch.lucene.queries.SearchAfterSortedDocQuery; @@ -71,7 +73,9 @@ import org.opensearch.search.aggregations.MultiBucketCollector; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.search.aggregations.bucket.BucketsAggregator; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.missing.MissingOrder; +import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.searchafter.SearchAfterBuilder; import org.opensearch.search.sort.SortAndFormats; @@ -80,6 +84,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.LongUnaryOperator; @@ -111,6 +116,10 @@ final class CompositeAggregator extends BucketsAggregator { private boolean earlyTerminated; + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; + private LongKeyedBucketOrds bucketOrds = null; + private Rounding.Prepared preparedRounding = null; + CompositeAggregator( String name, AggregatorFactories factories, @@ -154,12 +163,33 @@ final class CompositeAggregator extends BucketsAggregator { } this.queue = new CompositeValuesCollectorQueue(context.bigArrays(), sources, size, rawAfterKey); this.rawAfterKey = rawAfterKey; + + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(); + if (!FastFilterRewriteHelper.isCompositeAggRewriteable(sourceConfigs)) return; + fastFilterContext.setAggregationType( + new FastFilterRewriteHelper.CompositeAggregationType(sourceConfigs, rawAfterKey, formats, size) + ); + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + // bucketOrds is the data structure for saving date histogram results + bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), CardinalityUpperBound.ONE); + // Currently the filter rewrite is only supported for date histograms + FastFilterRewriteHelper.CompositeAggregationType aggregationType = + (FastFilterRewriteHelper.CompositeAggregationType) fastFilterContext.aggregationType; + preparedRounding = aggregationType.getRoundingPreparer(); + fastFilterContext.buildFastFilter( + context, + fc -> FastFilterRewriteHelper.getAggregationBounds(context, fc.getFieldType().name()), + x -> aggregationType.getRounding(), + () -> preparedRounding + ); + } } @Override protected void doClose() { try { Releasables.close(queue); + Releasables.close(bucketOrds); } finally { Releasables.close(sources); } @@ -187,12 +217,14 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } int num = Math.min(size, queue.size()); - final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; + InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num]; + long[] bucketOrdsToCollect = new long[queue.size()]; for (int i = 0; i < queue.size(); i++) { bucketOrdsToCollect[i] = i; } InternalAggregations[] subAggsForBuckets = buildSubAggsForBuckets(bucketOrdsToCollect); + while (queue.size() > 0) { int slot = queue.pop(); CompositeKey key = queue.toCompositeKey(slot); @@ -208,6 +240,43 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I aggs ); } + + // Build results from fast filters optimization + if (bucketOrds != null) { + // CompositeKey is the value of bucket key + final Map bucketMap = new HashMap<>(); + // Some segments may not be optimized, so buckets may contain results from the queue. + for (InternalComposite.InternalBucket internalBucket : buckets) { + bucketMap.put(internalBucket.getRawKey(), internalBucket); + } + // Loop over the buckets in the bucketOrds, and populate the map accordingly + LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(0); + while (ordsEnum.next()) { + Long bucketKeyValue = ordsEnum.value(); + CompositeKey key = new CompositeKey(bucketKeyValue); + if (bucketMap.containsKey(key)) { + long docCount = bucketDocCount(ordsEnum.ord()) + bucketMap.get(key).getDocCount(); + bucketMap.get(key).setDocCount(docCount); + } else { + InternalComposite.InternalBucket bucket = new InternalComposite.InternalBucket( + sourceNames, + formats, + key, + reverseMuls, + missingOrders, + bucketDocCount(ordsEnum.ord()), + buildEmptySubAggregations() + ); + bucketMap.put(key, bucket); + } + } + // since a map is not sorted structure, sort it before transform back to buckets + List bucketList = new ArrayList<>(bucketMap.values()); + CollectionUtil.introSort(bucketList, InternalComposite.InternalBucket::compareKey); + buckets = bucketList.subList(0, Math.min(size, bucketList.size())).toArray(InternalComposite.InternalBucket[]::new); + num = buckets.length; + } + CompositeKey lastBucket = num > 0 ? buckets[num - 1].getRawKey() : null; return new InternalAggregation[] { new InternalComposite( @@ -296,7 +365,7 @@ private Sort buildIndexSortPrefix(LeafReaderContext context) throws IOException if (indexSortField.getReverse() != (source.reverseMul == -1)) { if (i == 0) { - // the leading index sort matches the leading source field but the order is reversed + // the leading index sort matches the leading source field, but the order is reversed, // so we don't check the other sources. return new Sort(indexSortField); } @@ -304,8 +373,8 @@ private Sort buildIndexSortPrefix(LeafReaderContext context) throws IOException } sortFields.add(indexSortField); if (sourceConfig.valuesSource() instanceof RoundingValuesSource) { - // the rounding "squashes" many values together, that breaks the ordering of sub-values - // so we ignore subsequent source even if they match the index sort. + // the rounding "squashes" many values together, that breaks the ordering of sub-values, + // so we ignore the subsequent sources even if they match the index sort. break; } } @@ -448,6 +517,16 @@ private void processLeafFromQuery(LeafReaderContext ctx, Sort indexSortPrefix) t @Override protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(bucketOrds.add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); + finishLeaf(); boolean fillDocIdSet = deferredCollectors != NO_OP_COLLECTOR; @@ -477,9 +556,10 @@ protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucket docIdSetBuilder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc()); } if (rawAfterKey != null && sortPrefixLen > 0) { - // We have an after key and index sort is applicable so we jump directly to the doc - // that is after the index sort prefix using the rawAfterKey and we start collecting - // document from there. + // We have an after key and index sort is applicable, so we jump directly to the doc + // after the index sort prefix using the rawAfterKey and we start collecting + // documents from there. + assert indexSortPrefix != null; processLeafFromQuery(ctx, indexSortPrefix); throw new CollectionTerminatedException(); } else { @@ -507,6 +587,8 @@ public void collect(int doc, long bucket) throws IOException { try { long docCount = docCountProvider.getDocCount(doc); if (queue.addIfCompetitive(indexSortPrefix, docCount)) { + // one doc may contain multiple values, we iterate over and collect one by one + // so the same doc can appear multiple times here if (builder != null && lastDoc != doc) { builder.add(doc); lastDoc = doc; @@ -569,7 +651,7 @@ private LeafBucketCollector getSecondPassCollector(LeafBucketCollector subCollec @Override public void collect(int doc, long zeroBucket) throws IOException { assert zeroBucket == 0; - Integer slot = queue.compareCurrent(); + Integer slot = queue.getCurrentSlot(); if (slot != null) { // The candidate key is a top bucket. // We can defer the collection of this document/bucket to the sub collector diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java index 5ddeb22d33a6f..338ebdc66eef7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeKey.java @@ -44,7 +44,7 @@ * * @opensearch.internal */ -class CompositeKey implements Writeable { +public class CompositeKey implements Writeable { private final Comparable[] values; CompositeKey(Comparable... values) { @@ -64,11 +64,11 @@ Comparable[] values() { return values; } - int size() { + public int size() { return values.length; } - Comparable get(int pos) { + public Comparable get(int pos) { assert pos < values.length; return values[pos]; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java index 6ee1682a7b196..2c4d451322bca 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesCollectorQueue.java @@ -47,6 +47,8 @@ /** * A specialized {@link PriorityQueue} implementation for composite buckets. + * Can think of this as a max heap that holds the top small buckets slots in order. + * Each slot holds the values of the composite bucket key it represents. * * @opensearch.internal */ @@ -77,7 +79,7 @@ public int hashCode() { private final BigArrays bigArrays; private final int maxSize; - private final Map map; + private final Map map; // to quickly find the slot for a value private final SingleDimensionValuesSource[] arrays; private LongArray docCounts; @@ -108,7 +110,7 @@ public int hashCode() { @Override protected boolean lessThan(Integer a, Integer b) { - return compare(a, b) > 0; + return compare(a, b) > 0; // max heap } /** @@ -119,10 +121,10 @@ boolean isFull() { } /** - * Compares the current candidate with the values in the queue and returns + * Try to get the slot of the current/candidate values in the queue and returns * the slot if the candidate is already in the queue or null if the candidate is not present. */ - Integer compareCurrent() { + Integer getCurrentSlot() { return map.get(new Slot(CANDIDATE_SLOT)); } @@ -281,32 +283,34 @@ boolean addIfCompetitive(long inc) { */ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) { // checks if the candidate key is competitive - Integer topSlot = compareCurrent(); - if (topSlot != null) { + Integer curSlot = getCurrentSlot(); + if (curSlot != null) { // this key is already in the top N, skip it - docCounts.increment(topSlot, inc); + docCounts.increment(curSlot, inc); return true; } + if (afterKeyIsSet) { int cmp = compareCurrentWithAfter(); if (cmp <= 0) { if (indexSortSourcePrefix < 0 && cmp == indexSortSourcePrefix) { - // the leading index sort is in the reverse order of the leading source + // the leading index sort is and the leading source order are both reversed, // so we can early terminate when we reach a document that is smaller // than the after key (collected on a previous page). throw new CollectionTerminatedException(); } - // key was collected on a previous page, skip it (>= afterKey). + // the key was collected on a previous page, skip it. return false; } } + + // the heap is full, check if the candidate key larger than max heap top if (size() >= maxSize) { - // the tree map is full, check if the candidate key should be kept int cmp = compare(CANDIDATE_SLOT, top()); if (cmp > 0) { if (cmp <= indexSortSourcePrefix) { - // index sort guarantees that there is no key greater or equal than the - // current one in the subsequent documents so we can early terminate. + // index sort guarantees the following documents will have a key larger than the current candidate, + // so we can early terminate. throw new CollectionTerminatedException(); } // the candidate key is not competitive, skip it. @@ -324,7 +328,7 @@ boolean addIfCompetitive(int indexSortSourcePrefix, long inc) { } else { newSlot = size(); } - // move the candidate key to its new slot + // move the candidate key to its new slot by copy its values to the new slot copyCurrent(newSlot, inc); map.put(new Slot(newSlot), newSlot); add(newSlot); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index 788a4ddc15374..5289b3a34ab34 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -156,7 +156,7 @@ public MissingOrder missingOrder() { /** * Returns true if the source contains a script that can change the value. */ - protected boolean hasScript() { + public boolean hasScript() { return hasScript; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index fd94ba355238a..3926ce9bbecb7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -298,7 +298,7 @@ public static void register(ValuesSourceRegistry.Builder builder) { // TODO once composite is plugged in to the values source registry or at least understands Date values source types use it // here Rounding.Prepared preparedRounding = rounding.prepareForUnknown(); - RoundingValuesSource vs = new RoundingValuesSource(numeric, preparedRounding); + RoundingValuesSource vs = new RoundingValuesSource(numeric, preparedRounding, rounding); // is specified in the builder. final DocValueFormat docValueFormat = format == null ? DocValueFormat.RAW : valuesSourceConfig.format(); final MappedFieldType fieldType = valuesSourceConfig.fieldType(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java index 9f8a4cff5f3fc..43f1ad32a66f4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/InternalComposite.java @@ -339,7 +339,7 @@ public static class InternalBucket extends InternalMultiBucketAggregation.Intern KeyComparable { private final CompositeKey key; - private final long docCount; + private long docCount; private final InternalAggregations aggregations; private final transient int[] reverseMuls; private final transient MissingOrder[] missingOrders; @@ -436,6 +436,10 @@ public long getDocCount() { return docCount; } + public void setDocCount(long docCount) { + this.docCount = docCount; + } + @Override public Aggregations getAggregations() { return aggregations; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java index 3d6730203b6ae..dc130eb54c0ea 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/PointsSortedDocsProducer.java @@ -68,6 +68,7 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade // no value for the field return DocIdSet.EMPTY; } + long lowerBucket = Long.MIN_VALUE; Comparable lowerValue = queue.getLowerValueLeadSource(); if (lowerValue != null) { @@ -76,7 +77,6 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade } lowerBucket = (Long) lowerValue; } - long upperBucket = Long.MAX_VALUE; Comparable upperValue = queue.getUpperValueLeadSource(); if (upperValue != null) { @@ -85,6 +85,7 @@ DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue, LeafReade } upperBucket = (Long) upperValue; } + DocIdSetBuilder builder = fillDocIdSet ? new DocIdSetBuilder(context.reader().maxDoc(), values, field) : null; Visitor visitor = new Visitor(context, queue, builder, values.getBytesPerDimension(), lowerBucket, upperBucket); try { @@ -146,6 +147,7 @@ public void visit(int docID, byte[] packedValue) throws IOException { } long bucket = bucketFunction.applyAsLong(packedValue); + // process previous bucket when new bucket appears if (first == false && bucket != lastBucket) { final DocIdSet docIdSet = bucketDocsBuilder.build(); if (processBucket(queue, context, docIdSet.iterator(), lastBucket, builder) && @@ -182,13 +184,13 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue return PointValues.Relation.CELL_OUTSIDE_QUERY; } } - if (upperBucket != Long.MAX_VALUE) { long minBucket = bucketFunction.applyAsLong(minPackedValue); if (minBucket > upperBucket) { return PointValues.Relation.CELL_OUTSIDE_QUERY; } } + return PointValues.Relation.CELL_CROSSES_QUERY; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java index 89315724ff9ed..3f5cf919f1755 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/RoundingValuesSource.java @@ -47,17 +47,19 @@ * * @opensearch.internal */ -class RoundingValuesSource extends ValuesSource.Numeric { +public class RoundingValuesSource extends ValuesSource.Numeric { private final ValuesSource.Numeric vs; - private final Rounding.Prepared rounding; + private final Rounding.Prepared preparedRounding; + private final Rounding rounding; /** - * - * @param vs The original values source - * @param rounding How to round the values + * @param vs The original values source + * @param preparedRounding How to round the values + * @param rounding The rounding strategy */ - RoundingValuesSource(Numeric vs, Rounding.Prepared rounding) { + RoundingValuesSource(Numeric vs, Rounding.Prepared preparedRounding, Rounding rounding) { this.vs = vs; + this.preparedRounding = preparedRounding; this.rounding = rounding; } @@ -71,8 +73,16 @@ public boolean isBigInteger() { return false; } + public Rounding.Prepared getPreparedRounding() { + return preparedRounding; + } + + public Rounding getRounding() { + return rounding; + } + public long round(long value) { - return rounding.round(value); + return preparedRounding.round(value); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index a71c15d551927..0ea820abbedf4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -33,8 +33,8 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.Prepared; @@ -42,7 +42,6 @@ import org.opensearch.common.util.IntArray; import org.opensearch.common.util.LongArray; import org.opensearch.core.common.util.ByteArray; -import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -53,6 +52,7 @@ import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.bucket.DeferableBucketAggregator; import org.opensearch.search.aggregations.bucket.DeferringBucketCollector; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.MergingBucketsDeferringCollector; import org.opensearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; @@ -127,14 +127,14 @@ static AutoDateHistogramAggregator build( * {@link MergingBucketsDeferringCollector#mergeBuckets(long[])}. */ private MergingBucketsDeferringCollector deferringCollector; - private final Weight[] filters; - private final DateFieldMapper.DateFieldType fieldType; protected final RoundingInfo[] roundingInfos; protected final int targetBuckets; protected int roundingIdx; protected Rounding.Prepared preparedRounding; + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; + private AutoDateHistogramAggregator( String name, AggregatorFactories factories, @@ -156,23 +156,23 @@ private AutoDateHistogramAggregator( this.roundingPreparer = roundingPreparer; this.preparedRounding = prepareRounding(0); - FilterRewriteHelper.FilterContext filterContext = FilterRewriteHelper.buildFastFilterContext( - parent(), - subAggregators.length, - context, - b -> getMinimumRounding(b[0], b[1]), - // Passing prepared rounding as supplier to ensure the correct prepared - // rounding is set as it is done during getMinimumRounding - () -> preparedRounding, - valuesSourceConfig, - fc -> FilterRewriteHelper.getAggregationBounds(context, fc.field()) + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(); + fastFilterContext.setAggregationType( + new FastFilterRewriteHelper.DateHistogramAggregationType( + valuesSourceConfig.fieldType(), + valuesSourceConfig.missing() != null, + valuesSourceConfig.script() != null + ) ); - if (filterContext != null) { - fieldType = filterContext.fieldType; - filters = filterContext.filters; - } else { - fieldType = null; - filters = null; + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + fastFilterContext.buildFastFilter( + context, + fc -> FastFilterRewriteHelper.getAggregationBounds(context, fc.getFieldType().name()), + b -> getMinimumRounding(b[0], b[1]), + // Passing prepared rounding as supplier to ensure the correct prepared + // rounding is set as it is done during getMinimumRounding + () -> preparedRounding + ); } } @@ -226,28 +226,21 @@ public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBuc return LeafBucketCollector.NO_OP_COLLECTOR; } + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(getBucketOrds().add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); + final SortedNumericDocValues values = valuesSource.longValues(ctx); final LeafBucketCollector iteratingCollector = getLeafCollector(values, sub); - - // Need to be declared as final and array for usage within the - // LeafBucketCollectorBase subclass below - final boolean[] useOpt = new boolean[1]; - useOpt[0] = filters != null; - return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - // Try fast filter aggregation if the filters have been created - // Skip if tried before and gave incorrect/incomplete results - if (useOpt[0]) { - useOpt[0] = FilterRewriteHelper.tryFastFilterAggregation(ctx, filters, fieldType, (key, count) -> { - incrementBucketDocCount( - FilterRewriteHelper.getBucketOrd(getBucketOrds().add(owningBucketOrd, preparedRounding.round(key))), - count - ); - }); - } - iteratingCollector.collect(doc, owningBucketOrd); } }; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8437e1dce9fe0..b95bd093b82a6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,13 +33,12 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; import org.apache.lucene.util.CollectionUtil; import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; -import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -49,8 +48,8 @@ import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; import org.opensearch.search.aggregations.bucket.BucketsAggregator; +import org.opensearch.search.aggregations.bucket.FastFilterRewriteHelper; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; -import org.opensearch.search.aggregations.support.FieldContext; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -81,9 +80,9 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg private final long minDocCount; private final LongBounds extendedBounds; private final LongBounds hardBounds; - private final Weight[] filters; private final LongKeyedBucketOrds bucketOrds; - private final DateFieldMapper.DateFieldType fieldType; + + private final FastFilterRewriteHelper.FastFilterContext fastFilterContext; DateHistogramAggregator( String name, @@ -116,26 +115,21 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), cardinality); - FilterRewriteHelper.FilterContext filterContext = FilterRewriteHelper.buildFastFilterContext( - parent, - subAggregators.length, - context, - x -> rounding, - () -> preparedRounding, - valuesSourceConfig, - this::computeBounds + fastFilterContext = new FastFilterRewriteHelper.FastFilterContext(); + fastFilterContext.setAggregationType( + new FastFilterRewriteHelper.DateHistogramAggregationType( + valuesSourceConfig.fieldType(), + valuesSourceConfig.missing() != null, + valuesSourceConfig.script() != null + ) ); - if (filterContext != null) { - fieldType = filterContext.fieldType; - filters = filterContext.filters; - } else { - filters = null; - fieldType = null; + if (fastFilterContext.isRewriteable(parent, subAggregators.length)) { + fastFilterContext.buildFastFilter(context, this::computeBounds, x -> rounding, () -> preparedRounding); } } - private long[] computeBounds(final FieldContext fieldContext) throws IOException { - final long[] bounds = FilterRewriteHelper.getAggregationBounds(context, fieldContext.field()); + private long[] computeBounds(final FastFilterRewriteHelper.DateHistogramAggregationType fieldContext) throws IOException { + final long[] bounds = FastFilterRewriteHelper.getAggregationBounds(context, fieldContext.getFieldType().name()); if (bounds != null) { // Update min/max limit if user specified any hard bounds if (hardBounds != null) { @@ -160,26 +154,20 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol return LeafBucketCollector.NO_OP_COLLECTOR; } - // Need to be declared as final and array for usage within the - // LeafBucketCollectorBase subclass below - final boolean[] useOpt = new boolean[1]; - useOpt[0] = filters != null; + boolean optimized = FastFilterRewriteHelper.tryFastFilterAggregation( + ctx, + fastFilterContext, + (key, count) -> incrementBucketDocCount( + FastFilterRewriteHelper.getBucketOrd(bucketOrds.add(0, preparedRounding.round(key))), + count + ) + ); + if (optimized) throw new CollectionTerminatedException(); SortedNumericDocValues values = valuesSource.longValues(ctx); return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { - // Try fast filter aggregation if the filters have been created - // Skip if tried before and gave incorrect/incomplete results - if (useOpt[0]) { - useOpt[0] = FilterRewriteHelper.tryFastFilterAggregation(ctx, filters, fieldType, (key, count) -> { - incrementBucketDocCount( - FilterRewriteHelper.getBucketOrd(bucketOrds.add(owningBucketOrd, preparedRounding.round(key))), - count - ); - }); - } - if (values.advanceExact(doc)) { int valuesCount = values.docValueCount(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java deleted file mode 100644 index 29cecd5b382cd..0000000000000 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/FilterRewriteHelper.java +++ /dev/null @@ -1,259 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.search.aggregations.bucket.histogram; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.PointValues; -import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.PointRangeQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Weight; -import org.apache.lucene.util.NumericUtils; -import org.opensearch.common.CheckedFunction; -import org.opensearch.common.Rounding; -import org.opensearch.common.lucene.search.function.FunctionScoreQuery; -import org.opensearch.index.mapper.DateFieldMapper; -import org.opensearch.index.query.DateRangeIncludingNowQuery; -import org.opensearch.search.aggregations.support.FieldContext; -import org.opensearch.search.aggregations.support.ValuesSourceConfig; -import org.opensearch.search.internal.SearchContext; - -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.OptionalLong; -import java.util.function.BiConsumer; -import java.util.function.Function; -import java.util.function.Supplier; - -/** - * Helpers functions to rewrite and optimize aggregations using - * range filter queries - * - * @opensearch.internal - */ -public class FilterRewriteHelper { - - static class FilterContext { - final DateFieldMapper.DateFieldType fieldType; - final Weight[] filters; - - public FilterContext(DateFieldMapper.DateFieldType fieldType, Weight[] filters) { - this.fieldType = fieldType; - this.filters = filters; - } - } - - private static final int MAX_NUM_FILTER_BUCKETS = 1024; - private static final Map, Function> queryWrappers; - - // Initialize the wrappers map for unwrapping the query - static { - queryWrappers = new HashMap<>(); - queryWrappers.put(ConstantScoreQuery.class, q -> ((ConstantScoreQuery) q).getQuery()); - queryWrappers.put(FunctionScoreQuery.class, q -> ((FunctionScoreQuery) q).getSubQuery()); - queryWrappers.put(DateRangeIncludingNowQuery.class, q -> ((DateRangeIncludingNowQuery) q).getQuery()); - queryWrappers.put(IndexOrDocValuesQuery.class, q -> ((IndexOrDocValuesQuery) q).getIndexQuery()); - } - - /** - * Recursively unwraps query into the concrete form - * for applying the optimization - */ - private static Query unwrapIntoConcreteQuery(Query query) { - while (queryWrappers.containsKey(query.getClass())) { - query = queryWrappers.get(query.getClass()).apply(query); - } - - return query; - } - - /** - * Finds the min and max bounds for segments within the passed search context - */ - private static long[] getIndexBoundsFromLeaves(final SearchContext context, final String fieldName) throws IOException { - final List leaves = context.searcher().getIndexReader().leaves(); - long min = Long.MAX_VALUE, max = Long.MIN_VALUE; - // Since the query does not specify bounds for aggregation, we can - // build the global min/max from local min/max within each segment - for (LeafReaderContext leaf : leaves) { - final PointValues values = leaf.reader().getPointValues(fieldName); - if (values != null) { - min = Math.min(min, NumericUtils.sortableBytesToLong(values.getMinPackedValue(), 0)); - max = Math.max(max, NumericUtils.sortableBytesToLong(values.getMaxPackedValue(), 0)); - } - } - - if (min == Long.MAX_VALUE || max == Long.MIN_VALUE) return null; - - return new long[] { min, max }; - } - - static long[] getAggregationBounds(final SearchContext context, final String fieldName) throws IOException { - final Query cq = unwrapIntoConcreteQuery(context.query()); - final long[] indexBounds = getIndexBoundsFromLeaves(context, fieldName); - if (cq instanceof PointRangeQuery) { - final PointRangeQuery prq = (PointRangeQuery) cq; - // Ensure that the query and aggregation are on the same field - if (prq.getField().equals(fieldName)) { - return new long[] { - // Minimum bound for aggregation is the max between query and global - Math.max(NumericUtils.sortableBytesToLong(prq.getLowerPoint(), 0), indexBounds[0]), - // Maximum bound for aggregation is the min between query and global - Math.min(NumericUtils.sortableBytesToLong(prq.getUpperPoint(), 0), indexBounds[1]) }; - } - } else if (cq instanceof MatchAllDocsQuery) { - return indexBounds; - } - - return null; - } - - /** - * Creates the range query filters for aggregations using the interval, min/max - * bounds and the rounding values - */ - private static Weight[] createFilterForAggregations( - final SearchContext context, - final Rounding rounding, - final Rounding.Prepared preparedRounding, - final String field, - final DateFieldMapper.DateFieldType fieldType, - final long low, - final long high - ) throws IOException { - final OptionalLong intervalOpt = Rounding.getInterval(rounding); - if (intervalOpt.isEmpty()) { - return null; - } - - final long interval = intervalOpt.getAsLong(); - // Calculate the number of buckets using range and interval - long roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); - long prevRounded = roundedLow; - int bucketCount = 0; - while (roundedLow <= fieldType.convertNanosToMillis(high)) { - bucketCount++; - // Below rounding is needed as the interval could return in - // non-rounded values for something like calendar month - roundedLow = preparedRounding.round(roundedLow + interval); - if (prevRounded == roundedLow) break; - prevRounded = roundedLow; - } - - Weight[] filters = null; - if (bucketCount > 0 && bucketCount <= MAX_NUM_FILTER_BUCKETS) { - int i = 0; - filters = new Weight[bucketCount]; - roundedLow = preparedRounding.round(fieldType.convertNanosToMillis(low)); - while (i < bucketCount) { - // Calculate the lower bucket bound - final byte[] lower = new byte[8]; - NumericUtils.longToSortableBytes(i == 0 ? low : fieldType.convertRoundedMillisToNanos(roundedLow), lower, 0); - // Calculate the upper bucket bound - final byte[] upper = new byte[8]; - roundedLow = preparedRounding.round(roundedLow + interval); - // Subtract -1 if the minimum is roundedLow as roundedLow itself - // is included in the next bucket - NumericUtils.longToSortableBytes( - i + 1 == bucketCount ? high : fieldType.convertRoundedMillisToNanos(roundedLow) - 1, - upper, - 0 - ); - filters[i++] = context.searcher().createWeight(new PointRangeQuery(field, lower, upper, 1) { - @Override - protected String toString(int dimension, byte[] value) { - return null; - } - }, ScoreMode.COMPLETE_NO_SCORES, 1); - } - } - - return filters; - } - - static FilterContext buildFastFilterContext( - final Object parent, - final int subAggLength, - SearchContext context, - Function roundingFunction, - Supplier preparedRoundingSupplier, - ValuesSourceConfig valuesSourceConfig, - CheckedFunction computeBounds - ) throws IOException { - // Create the filters for fast aggregation only if the query is instance - // of point range query and there aren't any parent/sub aggregations - if (parent == null && subAggLength == 0 && valuesSourceConfig.missing() == null && valuesSourceConfig.script() == null) { - final FieldContext fieldContext = valuesSourceConfig.fieldContext(); - if (fieldContext != null) { - final String fieldName = fieldContext.field(); - final long[] bounds = computeBounds.apply(fieldContext); - if (bounds != null) { - assert fieldContext.fieldType() instanceof DateFieldMapper.DateFieldType; - final DateFieldMapper.DateFieldType fieldType = (DateFieldMapper.DateFieldType) fieldContext.fieldType(); - final Rounding rounding = roundingFunction.apply(bounds); - final Weight[] filters = FilterRewriteHelper.createFilterForAggregations( - context, - rounding, - preparedRoundingSupplier.get(), - fieldName, - fieldType, - bounds[0], - bounds[1] - ); - return new FilterContext(fieldType, filters); - } - } - } - return null; - } - - static long getBucketOrd(long bucketOrd) { - if (bucketOrd < 0) { // already seen - bucketOrd = -1 - bucketOrd; - } - - return bucketOrd; - } - - static boolean tryFastFilterAggregation( - final LeafReaderContext ctx, - final Weight[] filters, - final DateFieldMapper.DateFieldType fieldType, - final BiConsumer incrementDocCount - ) throws IOException { - final int[] counts = new int[filters.length]; - int i; - for (i = 0; i < filters.length; i++) { - counts[i] = filters[i].count(ctx); - if (counts[i] == -1) { - // Cannot use the optimization if any of the counts - // is -1 indicating the segment might have deleted documents - return false; - } - } - - for (i = 0; i < filters.length; i++) { - if (counts[i] > 0) { - incrementDocCount.accept( - fieldType.convertNanosToMillis( - NumericUtils.sortableBytesToLong(((PointRangeQuery) filters[i].getQuery()).getLowerPoint(), 0) - ), - counts[i] - ); - } - } - throw new CollectionTerminatedException(); - } -} diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index eabc4b7764eed..b581e552fec4f 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -1279,7 +1279,7 @@ public void testWithDateHistogram() throws IOException { }, (result) -> { assertEquals(3, result.getBuckets().size()); - assertEquals("{date=1508457600000}", result.afterKey().toString()); + assertEquals("{date=1508457600000}", result.afterKey().toString()); // 2017-10-20T00:00:00 assertEquals("{date=1474329600000}", result.getBuckets().get(0).getKeyAsString()); assertEquals(2L, result.getBuckets().get(0).getDocCount()); assertEquals("{date=1508371200000}", result.getBuckets().get(1).getKeyAsString()); @@ -1300,9 +1300,8 @@ public void testWithDateHistogram() throws IOException { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date").field("date") .calendarInterval(DateHistogramInterval.days(1)); return new CompositeAggregationBuilder("name", Collections.singletonList(histo)).aggregateAfter( - createAfterKey("date", 1474329600000L) + createAfterKey("date", 1474329600000L) // 2016-09-20T00:00:00 ); - }, (result) -> { assertEquals(2, result.getBuckets().size()); @@ -2242,21 +2241,20 @@ private , V extends Comparable> void testRandomTerms( Function transformKey ) throws IOException { int numTerms = randomIntBetween(10, 500); - List terms = new ArrayList<>(); + List terms = new ArrayList<>(); // possible values for the terms for (int i = 0; i < numTerms; i++) { terms.add(randomSupplier.get()); } int numDocs = randomIntBetween(100, 200); List>> dataset = new ArrayList<>(); - - Set valuesSet = new HashSet<>(); - Map, AtomicLong> expectedDocCounts = new HashMap<>(); + Set valuesSet = new HashSet<>(); // how many different values + Map, AtomicLong> expectedDocCounts = new HashMap<>(); // how many docs for each value for (int i = 0; i < numDocs; i++) { int numValues = randomIntBetween(1, 5); Set values = new HashSet<>(); for (int j = 0; j < numValues; j++) { int rand = randomIntBetween(0, terms.size() - 1); - if (values.add(terms.get(rand))) { + if (values.add(terms.get(rand))) { // values are unique for one doc AtomicLong count = expectedDocCounts.computeIfAbsent(terms.get(rand), (k) -> new AtomicLong(0)); count.incrementAndGet(); valuesSet.add(terms.get(rand)); @@ -2264,9 +2262,8 @@ private , V extends Comparable> void testRandomTerms( } dataset.add(Collections.singletonMap(field, new ArrayList<>(values))); } - List expected = new ArrayList<>(valuesSet); + List expected = new ArrayList<>(valuesSet); // how many buckets expected Collections.sort(expected); - List> seen = new ArrayList<>(); AtomicBoolean finish = new AtomicBoolean(false); int size = randomIntBetween(1, expected.size()); From 309413f9078d8900e2779242a61febeba610ae4d Mon Sep 17 00:00:00 2001 From: Siddhant Deshmukh Date: Wed, 17 Jan 2024 16:07:27 -0800 Subject: [PATCH 80/81] Bump com.google.api:gax-httpjson from 0.103.1 to 2.39.0 in /plugins/repository-gcs (#11912) --------- Signed-off-by: dependabot[bot] Signed-off-by: Siddhant Deshmukh Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 | 1 - plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index ea96035807276..9ff8dcf623681 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -172,6 +172,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `lycheeverse/lychee-action` from 1.8.0 to 1.9.1 ([#11795](https://github.com/opensearch-project/OpenSearch/pull/11795), [#11887](https://github.com/opensearch-project/OpenSearch/pull/11887)) - Bump `Lucene` from 9.8.0 to 9.9.1 ([#11421](https://github.com/opensearch-project/OpenSearch/pull/11421)) - Bump `com.networknt:json-schema-validator` from 1.0.86 to 1.1.0 ([#11886](https://github.com/opensearch-project/OpenSearch/pull/11886)) +- Bump `com.google.api:gax-httpjson` from 0.103.1 to 2.39.0 ([#11794](https://github.com/opensearch-project/OpenSearch/pull/11794)) ### Changed - Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840)) diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 1bef5146f1db9..af6bf38cb065c 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -54,7 +54,7 @@ versions << [ dependencies { api 'com.google.api:api-common:1.8.1' api 'com.google.api:gax:2.35.0' - api 'com.google.api:gax-httpjson:0.103.1' + api 'com.google.api:gax-httpjson:2.39.0' api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 deleted file mode 100644 index 11315004e233d..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -041d99172fda933bc879bdfd8de9420c5c34107e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 new file mode 100644 index 0000000000000..d0f3b8dfa6d25 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-2.39.0.jar.sha1 @@ -0,0 +1 @@ +ebff086f21c54c1eb02056f94255b4f1836a8efe \ No newline at end of file From 904c9a99987c5bb2b2db02a9e2c911af51f23616 Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 18 Jan 2024 10:06:20 +0530 Subject: [PATCH 81/81] Fix flaky RemoteFSTranslogTests testMetadataFileDeletion test #9605 (#11899) Signed-off-by: Ashish Singh --- .../index/translog/RemoteFsTranslogTests.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 6bfab278993ed..a83e737dc25c1 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -86,7 +86,6 @@ import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; @@ -715,6 +714,7 @@ public void testSimpleOperationsUpload() throws Exception { translog.setMinSeqNoToKeep(0); // This should not trim anything from local translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(2, translog.readers.size()); assertBusy(() -> { assertEquals(4, translog.allUploaded().size()); @@ -728,6 +728,7 @@ public void testSimpleOperationsUpload() throws Exception { // This should not trim tlog-2.* files from remote as we not uploading any more translog to remote translog.setMinSeqNoToKeep(1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertBusy(() -> { assertEquals(4, translog.allUploaded().size()); @@ -766,6 +767,7 @@ public void testMetadataFileDeletion() throws Exception { addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); translog.setMinSeqNoToKeep(i); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); } assertBusy(() -> assertEquals(4, translog.allUploaded().size())); @@ -776,6 +778,7 @@ public void testMetadataFileDeletion() throws Exception { addToTranslogAndListAndUpload(translog, ops, new Translog.Index(String.valueOf(i), i, primaryTerm.get(), new byte[] { 1 })); } translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1 + moreDocs, translog.readers.size()); assertBusy(() -> assertEquals(2 + 2L * moreDocs, translog.allUploaded().size())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); @@ -783,6 +786,7 @@ public void testMetadataFileDeletion() throws Exception { int totalDocs = numDocs + moreDocs; translog.setMinSeqNoToKeep(totalDocs - 1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); addToTranslogAndListAndUpload( translog, @@ -791,6 +795,7 @@ public void testMetadataFileDeletion() throws Exception { ); translog.setMinSeqNoToKeep(totalDocs); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); // Change primary term and test the deletion of older primaries @@ -841,6 +846,7 @@ public void testDrainSync() throws Exception { translog.setMinSeqNoToKeep(0); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); // Case 1 - During ongoing uploads, the available permits are 0. @@ -869,6 +875,7 @@ public void testDrainSync() throws Exception { // Case 3 - After drainSync, if trimUnreferencedReaders is attempted, we do not delete from remote store. translog.setMinSeqNoToKeep(1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertEquals(6, translog.allUploaded().size()); assertEquals(mdFiles, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR))); @@ -892,6 +899,7 @@ public void testDrainSync() throws Exception { translog.setMinSeqNoToKeep(3); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertBusy(() -> assertEquals(4, translog.allUploaded().size())); assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); @@ -1048,7 +1056,7 @@ public void testConcurrentWriteViewsAndSnapshot() throws Throwable { final int threadId = i; writers[i] = new Thread(new AbstractRunnable() { @Override - public void doRun() throws BrokenBarrierException, InterruptedException, IOException { + public void doRun() throws Exception { barrier.await(); int counter = 0; while (run.get() && idGenerator.get() < maxOps) { @@ -1090,6 +1098,7 @@ public void doRun() throws BrokenBarrierException, InterruptedException, IOExcep // deletionPolicy.setLocalCheckpointOfSafeCommit(localCheckpoint); translog.setMinSeqNoToKeep(localCheckpoint + 1); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); } } if (id % 7 == 0) {