diff --git a/.ci/bwcVersions b/.ci/bwcVersions index b6acb886dc327..52f1a492a3f74 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -53,4 +53,5 @@ BWC_VERSION: - "2.3.0" - "2.3.1" - "2.4.0" + - "2.4.1" - "2.5.0" diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index b0ea0f782cb62..5a75d2c877992 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -27,4 +27,3 @@ jobs: with: github_token: ${{ steps.github_app_token.outputs.token }} head_template: backport/backport-<%= number %>-to-<%= base %> - files_to_skip: 'CHANGELOG.md' diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index a630fc8d9fcc2..6023c875c6796 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -71,7 +71,7 @@ jobs: files: ./codeCoverage.xml - name: Create Comment Success - if: ${{ github.event_name == 'pull_request_target' && success() }} + if: ${{ github.event_name == 'pull_request_target' && success() && env.result == 'SUCCESS' }} uses: peter-evans/create-or-update-comment@v2 with: issue-number: ${{ env.pr_number }} @@ -81,6 +81,33 @@ jobs: * **URL:** ${{ env.workflow_url }} * **CommitID:** ${{ env.pr_from_sha }} + - name: Extract Test Failure + if: ${{ github.event_name == 'pull_request_target' && env.result != 'SUCCESS' }} + run: | + TEST_FAILURES=`curl -s "${{ env.workflow_url }}/testReport/api/json?tree=suites\[cases\[status,className,name\]\]" | jq -r '.. | objects | select(.status=="FAILED",.status=="REGRESSION") | (.className + "." + .name)' | uniq -c | sort -n -r | head -n 10` + if [[ "$TEST_FAILURES" != "" ]] + then + echo "test_failures<> $GITHUB_ENV + echo "" >> $GITHUB_ENV + echo "* **TEST FAILURES:**" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "$TEST_FAILURES" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + fi + + - name: Create Comment Flaky + if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} + uses: peter-evans/create-or-update-comment@v2 + with: + issue-number: ${{ env.pr_number }} + body: | + ### Gradle Check (Jenkins) Run Completed with: + * **RESULT:** ${{ env.result }} :grey_exclamation: ${{ env.test_failures }} + * **URL:** ${{ env.workflow_url }} + * **CommitID:** ${{ env.pr_from_sha }} + Please review all [flaky tests](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) that succeeded after retry and create an issue if one does not already exist to track the flaky failure. + - name: Create Comment Failure if: ${{ github.event_name == 'pull_request_target' && failure() }} uses: peter-evans/create-or-update-comment@v2 @@ -88,8 +115,8 @@ jobs: issue-number: ${{ env.pr_number }} body: | ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :x: + * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} * **URL:** ${{ env.workflow_url }} * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure below, then iterate to green. + Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? diff --git a/CHANGELOG.md b/CHANGELOG.md index 61cc3ccae310c..00e854f22ecd4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) - Add project health badges to the README.md ([#4843](https://github.com/opensearch-project/OpenSearch/pull/4843)) - [Test] Add IAE test for deprecated edgeNGram analyzer name ([#5040](https://github.com/opensearch-project/OpenSearch/pull/5040)) +- Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) +- Add feature flag for extensions ([#5211](https://github.com/opensearch-project/OpenSearch/pull/5211)) - [Identity] Document identity roadmap and feature branch processes ([#4583](https://github.com/opensearch-project/OpenSearch/pull/4583)) - [Identity] Add stubs for AccessTokenManager ([#4612](https://github.com/opensearch-project/OpenSearch/pull/4612)) - [Identity] Permissions check API ([#4516](https://github.com/opensearch-project/OpenSearch/pull/4516)) @@ -21,7 +23,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Identity] Strategy for Delegated Authority using Tokens ([#4826](https://github.com/opensearch-project/OpenSearch/pull/4826)) - [Identity] User operations: create update delete ([#4741](https://github.com/opensearch-project/OpenSearch/pull/4741)) - ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 @@ -44,11 +45,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bumps `reactor-core` from 3.4.18 to 3.4.23 ([#4548](https://github.com/opensearch-project/OpenSearch/pull/4548)) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Update Apache Lucene to 9.5.0-snapshot-a4ef70f ([#4979](https://github.com/opensearch-project/OpenSearch/pull/4979)) -- Bumps `bcpg-fips` from 1.0.5.1 to 1.0.7.1 - Update to Gradle 7.6 and JDK-19 ([#4973](https://github.com/opensearch-project/OpenSearch/pull/4973)) ### Changed -<<<<<<< HEAD - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) @@ -56,8 +55,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Weighted round-robin scheduling policy for shard coordination traffic ([#4241](https://github.com/opensearch-project/OpenSearch/pull/4241)) - Add index specific setting for remote repository ([#4253](https://github.com/opensearch-project/OpenSearch/pull/4253)) - [Segment Replication] Update replicas to commit SegmentInfos instead of relying on SIS files from primary shards. ([#4402](https://github.com/opensearch-project/OpenSearch/pull/4402)) -======= ->>>>>>> origin/main - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) - Relax visibility of the HTTP_CHANNEL_KEY and HTTP_SERVER_CHANNEL_KEY to make it possible for the plugins to access associated Netty4HttpChannel / Netty4HttpServerChannel instance ([#4638](https://github.com/opensearch-project/OpenSearch/pull/4638)) - Use ReplicationFailedException instead of OpensearchException in ReplicationTarget ([#4725](https://github.com/opensearch-project/OpenSearch/pull/4725)) @@ -81,7 +78,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) ### Fixed -<<<<<<< HEAD - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) @@ -129,11 +125,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) - Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) -======= ->>>>>>> origin/main - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - +- Add jvm option to allow security manager ([#5194](https://github.com/opensearch-project/OpenSearch/pull/5194)) ### Security <<<<<<< HEAD @@ -157,6 +151,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added ### Dependencies +<<<<<<< HEAD +>>>>>>> origin/main +======= +- Bumps `bcpg-fips` from 1.0.5.1 to 1.0.7.1 +- Bumps `azure-storage-blob` from 12.16.1 to 12.20.0 ([#4995](https://github.com/opensearch-project/OpenSearch/pull/4995)) +- Bumps `commons-compress` from 1.21 to 1.22 ([#5104](https://github.com/opensearch-project/OpenSearch/pull/5104)) +- Bump `opencensus-contrib-http-util` from 0.18.0 to 0.31.1 ([#3633](https://github.com/opensearch-project/OpenSearch/pull/3633)) +- Bump `geoip2` from 3.0.1 to 3.0.2 ([#5103](https://github.com/opensearch-project/OpenSearch/pull/5103)) >>>>>>> origin/main ### Changed ### Deprecated diff --git a/build.gradle b/build.gradle index 1fb6cc3849710..076894863b1bf 100644 --- a/build.gradle +++ b/build.gradle @@ -316,10 +316,7 @@ allprojects { javadoc.options.encoding = 'UTF8' javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') javadoc.options.tags = ["opensearch.internal", "opensearch.api", "opensearch.experimental"] - if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_19) { - javadoc.options.addBooleanOption("-enable-preview", true) - javadoc.options.addStringOption("-release", BuildParams.runtimeJavaVersion.majorVersion) - } + javadoc.options.addStringOption("-release", targetCompatibility.majorVersion) } // support for reproducible builds @@ -416,6 +413,9 @@ gradle.projectsEvaluated { if (BuildParams.runtimeJavaVersion > JavaVersion.VERSION_17) { task.jvmArgs += ["-Djava.security.manager=allow"] } + if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_19) { + task.jvmArgs += ["--enable-preview"] + } } } diff --git a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/package-info.java b/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/package-info.java deleted file mode 100644 index 40c4b60f6deb0..0000000000000 --- a/buildSrc/reaper/src/main/java/org/elasticsearch/gradle/reaper/package-info.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Licensed to OpenSearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. OpenSearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * SPDX-License-Identifier: Apache-2.0 - */ -/** - * Adding a sample package level javadoc to pass javadoc validation - * on reaper package. - * TODO - Need to add package description - */ -package org.elasticsearch.gradle.reaper; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java index ae7b0d938e8ef..87a565e6f4431 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java @@ -51,8 +51,7 @@ import java.util.Arrays; import java.util.Comparator; -import java.util.List; -import java.util.stream.Collectors; +import java.util.Objects; /** * A plugin to manage getting and extracting distributions of OpenSearch. @@ -71,12 +70,6 @@ public class DistributionDownloadPlugin implements Plugin { private static final String SNAPSHOT_REPO_NAME = "opensearch-snapshots"; public static final String DISTRO_EXTRACTED_CONFIG_PREFIX = "opensearch_distro_extracted_"; - // for downloading Elasticsearch OSS distributions to run BWC - private static final String FAKE_IVY_GROUP_ES = "elasticsearch-distribution"; - private static final String DOWNLOAD_REPO_NAME_ES = "elasticsearch-downloads"; - private static final String SNAPSHOT_REPO_NAME_ES = "elasticsearch-snapshots"; - private static final String FAKE_SNAPSHOT_IVY_GROUP_ES = "elasticsearch-distribution-snapshot"; - private static final String RELEASE_PATTERN_LAYOUT = "/core/opensearch/[revision]/[module]-min-[revision](-[classifier]).[ext]"; private static final String SNAPSHOT_PATTERN_LAYOUT = "/snapshots/core/opensearch/[revision]/[module]-min-[revision](-[classifier])-latest.[ext]"; @@ -159,35 +152,20 @@ private DistributionDependency resolveDependencyNotation(Project p, OpenSearchDi return distributionsResolutionStrategiesContainer.stream() .sorted(Comparator.comparingInt(DistributionResolution::getPriority)) .map(r -> r.getResolver().resolve(p, distribution)) - .filter(d -> d != null) + .filter(Objects::nonNull) .findFirst() .orElseGet(() -> DistributionDependency.of(dependencyNotation(distribution))); } private static void addIvyRepo(Project project, String name, String url, String group, String... patternLayout) { - final List repos = Arrays.stream(patternLayout).map(pattern -> project.getRepositories().ivy(repo -> { - repo.setName(name); - repo.setUrl(url); - repo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - repo.patternLayout(layout -> layout.artifact(pattern)); - })).collect(Collectors.toList()); - project.getRepositories().exclusiveContent(exclusiveContentRepository -> { exclusiveContentRepository.filter(config -> config.includeGroup(group)); - exclusiveContentRepository.forRepositories(repos.toArray(new IvyArtifactRepository[repos.size()])); - }); - } - - private static void addIvyRepo2(Project project, String name, String url, String group) { - IvyArtifactRepository ivyRepo = project.getRepositories().ivy(repo -> { - repo.setName(name); - repo.setUrl(url); - repo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); - repo.patternLayout(layout -> layout.artifact("/downloads/elasticsearch/elasticsearch-oss-[revision](-[classifier]).[ext]")); - }); - project.getRepositories().exclusiveContent(exclusiveContentRepository -> { - exclusiveContentRepository.filter(config -> config.includeGroup(group)); - exclusiveContentRepository.forRepositories(ivyRepo); + exclusiveContentRepository.forRepositories(Arrays.stream(patternLayout).map(pattern -> project.getRepositories().ivy(repo -> { + repo.setName(name); + repo.setUrl(url); + repo.metadataSources(IvyArtifactRepository.MetadataSources::artifact); + repo.patternLayout(layout -> layout.artifact(pattern)); + })).toArray(IvyArtifactRepository[]::new)); }); } @@ -211,9 +189,6 @@ private static void setupDownloadServiceRepo(Project project) { ); addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://artifacts.opensearch.org", FAKE_SNAPSHOT_IVY_GROUP, SNAPSHOT_PATTERN_LAYOUT); } - - addIvyRepo2(project, DOWNLOAD_REPO_NAME_ES, "https://artifacts-no-kpi.elastic.co", FAKE_IVY_GROUP_ES); - addIvyRepo2(project, SNAPSHOT_REPO_NAME_ES, "https://snapshots-no-kpi.elastic.co", FAKE_SNAPSHOT_IVY_GROUP_ES); } /** @@ -222,16 +197,12 @@ private static void setupDownloadServiceRepo(Project project) { * The returned object is suitable to be passed to {@link DependencyHandler}. * The concrete type of the object will be a set of maven coordinates as a {@link String}. * Maven coordinates point to either the integ-test-zip coordinates on maven central, or a set of artificial - * coordinates that resolve to the Elastic download service through an ivy repository. + * coordinates that resolve to the OpenSearch download service through an ivy repository. */ private String dependencyNotation(OpenSearchDistribution distribution) { Version distroVersion = Version.fromString(distribution.getVersion()); if (distribution.getType() == Type.INTEG_TEST_ZIP) { - if (distroVersion.onOrAfter("1.0.0")) { - return "org.opensearch.distribution.integ-test-zip:opensearch:" + distribution.getVersion() + "@zip"; - } else { - return "org.elasticsearch.distribution.integ-test-zip:elasticsearch:" + distribution.getVersion() + "@zip"; - } + return "org.opensearch.distribution.integ-test-zip:opensearch:" + distribution.getVersion() + "@zip"; } String extension = distribution.getType().toString(); @@ -239,42 +210,24 @@ private String dependencyNotation(OpenSearchDistribution distribution) { if (distribution.getType() == Type.ARCHIVE) { extension = distribution.getPlatform() == Platform.WINDOWS ? "zip" : "tar.gz"; - if (distroVersion.onOrAfter("1.0.0")) { - switch (distribution.getArchitecture()) { - case ARM64: - classifier = ":" + distribution.getPlatform() + "-arm64"; - break; - case X64: - classifier = ":" + distribution.getPlatform() + "-x64"; - break; - case S390X: - classifier = ":" + distribution.getPlatform() + "-s390x"; - break; - default: - throw new IllegalArgumentException("Unsupported architecture: " + distribution.getArchitecture()); - } - } else if (distroVersion.onOrAfter("7.0.0")) { - classifier = ":" + distribution.getPlatform() + "-x86_64"; - } else { - classifier = ""; + switch (distribution.getArchitecture()) { + case ARM64: + classifier = ":" + distribution.getPlatform() + "-arm64"; + break; + case X64: + classifier = ":" + distribution.getPlatform() + "-x64"; + break; + case S390X: + classifier = ":" + distribution.getPlatform() + "-s390x"; + break; + default: + throw new IllegalArgumentException("Unsupported architecture: " + distribution.getArchitecture()); } } else if (distribution.getType() == Type.DEB) { - if (distroVersion.onOrAfter("7.0.0")) { - classifier = ":amd64"; - } else { - classifier = ""; - } - } else if (distribution.getType() == Type.RPM && distroVersion.before("7.0.0")) { - classifier = ""; + classifier = ":amd64"; } - String group; - if (distroVersion.onOrAfter("1.0.0")) { - group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; - return group + ":opensearch" + ":" + distribution.getVersion() + classifier + "@" + extension; - } else { - group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP_ES : FAKE_IVY_GROUP_ES; - return group + ":elasticsearch-oss" + ":" + distribution.getVersion() + classifier + "@" + extension; - } + String group = distribution.getVersion().endsWith("-SNAPSHOT") ? FAKE_SNAPSHOT_IVY_GROUP : FAKE_IVY_GROUP; + return group + ":opensearch" + ":" + distribution.getVersion() + classifier + "@" + extension; } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java index 1073ba01dafab..08b7054d7d53a 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/Jdk.java @@ -37,6 +37,7 @@ import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.Property; import org.gradle.api.tasks.TaskDependency; +import org.gradle.internal.os.OperatingSystem; import java.io.File; import java.util.Arrays; @@ -169,7 +170,7 @@ public Object getBinJavaPath() { return new Object() { @Override public String toString() { - return getHomeRoot() + "/bin/java"; + return OperatingSystem.current().getExecutableName(getHomeRoot() + "/bin/java"); } }; } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java index 9d6e78014916d..cdf22407f6076 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/OpenSearchTestBasePlugin.java @@ -109,7 +109,12 @@ public void execute(Task t) { test.systemProperty("java.locale.providers", "SPI,JRE"); } else { test.systemProperty("java.locale.providers", "SPI,COMPAT"); - test.jvmArgs("--illegal-access=warn"); + if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) < 0) { + test.jvmArgs("--illegal-access=warn"); + } + } + if (test.getJavaVersion().compareTo(JavaVersion.VERSION_17) > 0) { + test.jvmArgs("-Djava.security.manager=allow"); } } }); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java index 0f5348d5a8dcf..86823b82a379f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchCluster.java @@ -32,7 +32,6 @@ package org.opensearch.gradle.testclusters; import org.opensearch.gradle.FileSupplier; -import org.opensearch.gradle.Jdk; import org.opensearch.gradle.PropertyNormalization; import org.opensearch.gradle.ReaperService; import org.opensearch.gradle.http.WaitForHttpResource; @@ -75,7 +74,6 @@ public class OpenSearchCluster implements TestClusterConfiguration, Named { private final String path; private final String clusterName; private final NamedDomainObjectContainer nodes; - private final Jdk bwcJdk; private final File workingDirBase; private final LinkedHashMap> waitConditions = new LinkedHashMap<>(); private final Project project; @@ -92,8 +90,7 @@ public OpenSearchCluster( ReaperService reaper, File workingDirBase, FileSystemOperations fileSystemOperations, - ArchiveOperations archiveOperations, - Jdk bwcJdk + ArchiveOperations archiveOperations ) { this.path = project.getPath(); this.clusterName = clusterName; @@ -103,7 +100,6 @@ public OpenSearchCluster( this.archiveOperations = archiveOperations; this.workingDirBase = workingDirBase; this.nodes = project.container(OpenSearchNode.class); - this.bwcJdk = bwcJdk; // Always add the first node String zone = hasZoneProperty() ? "zone-1" : ""; @@ -167,7 +163,6 @@ private void addNode(String nodeName, String zoneName) { fileSystemOperations, archiveOperations, workingDirBase, - bwcJdk, zoneName ); // configure the cluster name eagerly diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index ab765efde7885..bcf9a8ba4d780 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -37,7 +37,6 @@ import org.opensearch.gradle.DistributionDownloadPlugin; import org.opensearch.gradle.OpenSearchDistribution; import org.opensearch.gradle.FileSupplier; -import org.opensearch.gradle.Jdk; import org.opensearch.gradle.LazyPropertyList; import org.opensearch.gradle.LazyPropertyMap; import org.opensearch.gradle.LoggedExec; @@ -132,7 +131,6 @@ public class OpenSearchNode implements TestClusterConfiguration { private final String name; private final Project project; private final ReaperService reaper; - private final Jdk bwcJdk; private final FileSystemOperations fileSystemOperations; private final ArchiveOperations archiveOperations; @@ -163,7 +161,7 @@ public class OpenSearchNode implements TestClusterConfiguration { private int currentDistro = 0; private TestDistribution testDistribution; - private List distributions = new ArrayList<>(); + private final List distributions = new ArrayList<>(); private volatile Process opensearchProcess; private Function nameCustomization = Function.identity(); private boolean isWorkingDirConfigured = false; @@ -172,11 +170,12 @@ public class OpenSearchNode implements TestClusterConfiguration { private Path confPathData; private String keystorePassword = ""; private boolean preserveDataDir = false; - private final Config opensearchConfig; - private final Config legacyESConfig; - private Config currentConfig; - private String zone; + private final Path configFile; + private final Path stdoutFile; + private final Path stderrFile; + private final Path stdinFile; + private final String zone; OpenSearchNode( String path, @@ -186,7 +185,6 @@ public class OpenSearchNode implements TestClusterConfiguration { FileSystemOperations fileSystemOperations, ArchiveOperations archiveOperations, File workingDirBase, - Jdk bwcJdk, String zone ) { this.path = path; @@ -195,7 +193,6 @@ public class OpenSearchNode implements TestClusterConfiguration { this.reaper = reaper; this.fileSystemOperations = fileSystemOperations; this.archiveOperations = archiveOperations; - this.bwcJdk = bwcJdk; workingDir = workingDirBase.toPath().resolve(safeName(name)).toAbsolutePath(); confPathRepo = workingDir.resolve("repo"); confPathData = workingDir.resolve("data"); @@ -203,107 +200,16 @@ public class OpenSearchNode implements TestClusterConfiguration { transportPortFile = confPathLogs.resolve("transport.ports"); httpPortsFile = confPathLogs.resolve("http.ports"); tmpDir = workingDir.resolve("tmp"); + configFile = workingDir.resolve("config/opensearch.yml"); + stdoutFile = confPathLogs.resolve("opensearch.stdout.log"); + stderrFile = confPathLogs.resolve("opensearch.stderr.log"); + stdinFile = workingDir.resolve("opensearch.stdin"); waitConditions.put("ports files", this::checkPortsFilesExistWithDelay); setTestDistribution(TestDistribution.INTEG_TEST); setVersion(VersionProperties.getOpenSearch()); - opensearchConfig = Config.getOpenSearchConfig(workingDir); - legacyESConfig = Config.getLegacyESConfig(workingDir); - currentConfig = opensearchConfig; this.zone = zone; } - /* - * An object to contain the configuration needed to install - * either an OpenSearch or an elasticsearch distribution on - * this test node. - * - * This is added to be able to run BWC testing against a - * cluster running elasticsearch. - * - * legacyESConfig will be removed in a future release. - */ - private static class Config { - final String distroName; - final String command; - final String keystoreTool; - final String pluginTool; - final String envTempDir; - final String envJavaOpts; - final String envPathConf; - final Path configFile; - final Path stdoutFile; - final Path stderrFile; - final Path stdinFile; - - Config( - String distroName, - String command, - String keystoreTool, - String pluginTool, - String envTempDir, - String envJavaOpts, - String envPathConf, - Path configFile, - Path stdoutFile, - Path stderrFile, - Path stdinFile - ) { - this.distroName = distroName; - this.command = command; - this.keystoreTool = keystoreTool; - this.pluginTool = pluginTool; - this.envTempDir = envTempDir; - this.envJavaOpts = envJavaOpts; - this.envPathConf = envPathConf; - this.configFile = configFile; - this.stdoutFile = stdoutFile; - this.stderrFile = stderrFile; - this.stdinFile = stdinFile; - } - - static Config getOpenSearchConfig(Path workingDir) { - Path confPathLogs = workingDir.resolve("logs"); - return new Config( - "OpenSearch", - "opensearch", - "opensearch-keystore", - "opensearch-plugin", - "OPENSEARCH_TMPDIR", - "OPENSEARCH_JAVA_OPTS", - "OPENSEARCH_PATH_CONF", - workingDir.resolve("config/opensearch.yml"), - confPathLogs.resolve("opensearch.stdout.log"), - confPathLogs.resolve("opensearch.stderr.log"), - workingDir.resolve("opensearch.stdin") - ); - } - - static Config getLegacyESConfig(Path workingDir) { - Path confPathLogs = workingDir.resolve("logs"); - return new Config( - "Elasticsearch", - "elasticsearch", - "elasticsearch-keystore", - "elasticsearch-plugin", - "ES_TMPDIR", - "ES_JAVA_OPTS", - "ES_PATH_CONF", - workingDir.resolve("config/elasticsearch.yml"), - confPathLogs.resolve("es.stdout.log"), - confPathLogs.resolve("es.stderr.log"), - workingDir.resolve("es.stdin") - ); - } - } - - private void applyConfig() { - if (getVersion().onOrAfter("1.0.0")) { - currentConfig = opensearchConfig; - } else { - currentConfig = legacyESConfig; - } - } - @Input @Optional public String getName() { @@ -321,7 +227,6 @@ public void setVersion(String version) { checkFrozen(); distributions.clear(); doSetVersion(version); - applyConfig(); } @Override @@ -331,7 +236,6 @@ public void setVersions(List versions) { for (String version : versions) { doSetVersion(version); } - applyConfig(); } private void doSetVersion(String version) { @@ -528,7 +432,7 @@ public void jvmArgs(String... values) { @Internal public Path getConfigDir() { - return currentConfig.configFile.getParent(); + return configFile.getParent(); } @Override @@ -555,7 +459,7 @@ public void freeze() { * @return stream of log lines */ public Stream logLines() throws IOException { - return Files.lines(currentConfig.stdoutFile, StandardCharsets.UTF_8); + return Files.lines(stdoutFile, StandardCharsets.UTF_8); } @Override @@ -601,23 +505,17 @@ public synchronized void start() { } if (pluginsToInstall.isEmpty() == false) { - if (getVersion().onOrAfter("7.6.0")) { - logToProcessStdout("installing " + pluginsToInstall.size() + " plugins in a single transaction"); - final String[] arguments = Stream.concat(Stream.of("install", "--batch"), pluginsToInstall.stream()).toArray(String[]::new); - runOpenSearchBinScript(currentConfig.pluginTool, arguments); - logToProcessStdout("installed plugins"); - } else { - logToProcessStdout("installing " + pluginsToInstall.size() + " plugins sequentially"); - pluginsToInstall.forEach(plugin -> runOpenSearchBinScript(currentConfig.pluginTool, "install", "--batch", plugin)); - logToProcessStdout("installed plugins"); - } + logToProcessStdout("installing " + pluginsToInstall.size() + " plugins in a single transaction"); + final String[] arguments = Stream.concat(Stream.of("install", "--batch"), pluginsToInstall.stream()).toArray(String[]::new); + runOpenSearchBinScript("opensearch-plugin", arguments); + logToProcessStdout("installed plugins"); } - logToProcessStdout("Creating " + currentConfig.command + " keystore with password set to [" + keystorePassword + "]"); + logToProcessStdout("Creating opensearch keystore with password set to [" + keystorePassword + "]"); if (keystorePassword.length() > 0) { - runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword, currentConfig.keystoreTool, "create", "-p"); + runOpenSearchBinScriptWithInput(keystorePassword + "\n" + keystorePassword, "opensearch-keystore", "create", "-p"); } else { - runOpenSearchBinScript(currentConfig.keystoreTool, "-v", "create"); + runOpenSearchBinScript("opensearch-keystore", "-v", "create"); } if (keystoreSettings.isEmpty() == false || keystoreFiles.isEmpty() == false) { @@ -645,7 +543,7 @@ public synchronized void start() { } } - logToProcessStdout("Starting " + currentConfig.distroName + " process"); + logToProcessStdout("Starting OpenSearch process"); startOpenSearchProcess(); } @@ -657,11 +555,11 @@ private boolean canUseSharedDistribution() { private void logToProcessStdout(String message) { try { - if (Files.exists(currentConfig.stdoutFile.getParent()) == false) { - Files.createDirectories(currentConfig.stdoutFile.getParent()); + if (Files.exists(stdoutFile.getParent()) == false) { + Files.createDirectories(stdoutFile.getParent()); } Files.write( - currentConfig.stdoutFile, + stdoutFile, ("[" + Instant.now().toString() + "] [BUILD] " + message + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.APPEND @@ -684,7 +582,6 @@ void goToNextVersion() { } logToProcessStdout("Switch version from " + getVersion() + " to " + distributions.get(currentDistro + 1).getVersion()); currentDistro += 1; - applyConfig(); setting("node.attr.upgraded", "true"); } @@ -696,7 +593,7 @@ private void copyExtraConfigFiles() { if (Files.exists(from.toPath()) == false) { throw new TestClustersException("Can't create extra config file from " + from + " for " + this + " as it does not exist"); } - Path dst = currentConfig.configFile.getParent().resolve(destination); + Path dst = configFile.getParent().resolve(destination); try { Files.createDirectories(dst.getParent()); Files.copy(from.toPath(), dst, StandardCopyOption.REPLACE_EXISTING); @@ -721,7 +618,7 @@ private void copyExtraJars() { Files.copy(from.toPath(), destination, StandardCopyOption.REPLACE_EXISTING); LOGGER.info("Added extra jar {} to {}", from.getName(), destination); } catch (IOException e) { - throw new UncheckedIOException("Can't copy extra jar dependency " + from.getName() + " to " + destination.toString(), e); + throw new UncheckedIOException("Can't copy extra jar dependency " + from.getName() + " to " + destination, e); } }); } @@ -794,9 +691,7 @@ private void runOpenSearchBinScriptWithInput(String input, String tool, CharSequ ArrayList result = new ArrayList<>(); result.add("/c"); result.add("bin\\" + tool + ".bat"); - for (CharSequence arg : args) { - result.add(arg); - } + result.addAll(Arrays.asList(args)); return result; }).onUnix(() -> Arrays.asList(args)).supply()); spec.setStandardInput(byteArrayInputStream); @@ -809,7 +704,7 @@ private void runOpenSearchBinScriptWithInput(String input, String tool, CharSequ private void runKeystoreCommandWithPassword(String keystorePassword, String input, CharSequence... args) { final String actualInput = keystorePassword.length() > 0 ? keystorePassword + "\n" + input : input; - runOpenSearchBinScriptWithInput(actualInput, currentConfig.keystoreTool, args); + runOpenSearchBinScriptWithInput(actualInput, "opensearch-keystore", args); } private void runOpenSearchBinScript(String tool, CharSequence... args) { @@ -819,7 +714,7 @@ private void runOpenSearchBinScript(String tool, CharSequence... args) { private Map getOpenSearchEnvironment() { Map defaultEnv = new HashMap<>(); getRequiredJavaHome().ifPresent(javaHome -> defaultEnv.put("JAVA_HOME", javaHome)); - defaultEnv.put(currentConfig.envPathConf, currentConfig.configFile.getParent().toString()); + defaultEnv.put("OPENSEARCH_PATH_CONF", configFile.getParent().toString()); String systemPropertiesString = ""; if (systemProperties.isEmpty() == false) { systemPropertiesString = " " @@ -829,7 +724,7 @@ private Map getOpenSearchEnvironment() { // OPENSEARCH_PATH_CONF is also set as an environment variable and for a reference to ${OPENSEARCH_PATH_CONF} // to work OPENSEARCH_JAVA_OPTS, we need to make sure that OPENSEARCH_PATH_CONF before OPENSEARCH_JAVA_OPTS. Instead, // we replace the reference with the actual value in other environment variables - .map(p -> p.replace("${" + currentConfig.envPathConf + "}", currentConfig.configFile.getParent().toString())) + .map(p -> p.replace("${OPENSEARCH_PATH_CONF}", configFile.getParent().toString())) .collect(Collectors.joining(" ")); } String jvmArgsString = ""; @@ -844,12 +739,12 @@ private Map getOpenSearchEnvironment() { } String heapSize = System.getProperty("tests.heap.size", "512m"); defaultEnv.put( - currentConfig.envJavaOpts, + "OPENSEARCH_JAVA_OPTS", "-Xms" + heapSize + " -Xmx" + heapSize + " -ea -esa " + systemPropertiesString + " " + jvmArgsString + " " + // Support passing in additional JVM arguments System.getProperty("tests.jvm.argline", "") ); - defaultEnv.put(currentConfig.envTempDir, tmpDir.toString()); + defaultEnv.put("OPENSEARCH_TMPDIR", tmpDir.toString()); // Windows requires this as it defaults to `c:\windows` despite OPENSEARCH_TMPDIR defaultEnv.put("TMP", tmpDir.toString()); @@ -868,27 +763,20 @@ private Map getOpenSearchEnvironment() { } private java.util.Optional getRequiredJavaHome() { - // If we are testing the current version of Elasticsearch, use the configured runtime Java + // If we are testing the current version of OpenSearch, use the configured runtime Java if (getTestDistribution() == TestDistribution.INTEG_TEST || getVersion().equals(VersionProperties.getOpenSearchVersion())) { return java.util.Optional.of(BuildParams.getRuntimeJavaHome()).map(File::getAbsolutePath); - } else if (getVersion().before("7.0.0")) { - return java.util.Optional.of(bwcJdk.getJavaHomePath().toString()); } else { // otherwise use the bundled JDK return java.util.Optional.empty(); } } - @Internal - Jdk getBwcJdk() { - return getVersion().before("7.0.0") ? bwcJdk : null; - } - private void startOpenSearchProcess() { final ProcessBuilder processBuilder = new ProcessBuilder(); Path effectiveDistroDir = getDistroDir(); List command = OS.>conditional() - .onUnix(() -> Arrays.asList(effectiveDistroDir.resolve("./bin/" + currentConfig.command).toString())) - .onWindows(() -> Arrays.asList("cmd", "/c", effectiveDistroDir.resolve("bin\\" + currentConfig.command + ".bat").toString())) + .onUnix(() -> List.of(effectiveDistroDir.resolve("./bin/opensearch").toString())) + .onWindows(() -> Arrays.asList("cmd", "/c", effectiveDistroDir.resolve("bin\\opensearch.bat").toString())) .supply(); processBuilder.command(command); processBuilder.directory(workingDir.toFile()); @@ -898,13 +786,13 @@ private void startOpenSearchProcess() { environment.putAll(getOpenSearchEnvironment()); // don't buffer all in memory, make sure we don't block on the default pipes - processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(currentConfig.stderrFile.toFile())); - processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(currentConfig.stdoutFile.toFile())); + processBuilder.redirectError(ProcessBuilder.Redirect.appendTo(stderrFile.toFile())); + processBuilder.redirectOutput(ProcessBuilder.Redirect.appendTo(stdoutFile.toFile())); if (keystorePassword != null && keystorePassword.length() > 0) { try { - Files.write(currentConfig.stdinFile, (keystorePassword + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE); - processBuilder.redirectInput(currentConfig.stdinFile.toFile()); + Files.write(stdinFile, (keystorePassword + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE); + processBuilder.redirectInput(stdinFile.toFile()); } catch (IOException e) { throw new TestClustersException("Failed to set the keystore password for " + this, e); } @@ -913,7 +801,7 @@ private void startOpenSearchProcess() { try { opensearchProcess = processBuilder.start(); } catch (IOException e) { - throw new TestClustersException("Failed to start " + currentConfig.command + " process for " + this, e); + throw new TestClustersException("Failed to start opensearch process for " + this, e); } reaper.registerPid(toString(), opensearchProcess.pid()); } @@ -985,8 +873,8 @@ public synchronized void stop(boolean tailLogs) { stopProcess(opensearchProcess.toHandle(), true); reaper.unregister(toString()); if (tailLogs) { - logFileContents("Standard output of node", currentConfig.stdoutFile); - logFileContents("Standard error of node", currentConfig.stderrFile); + logFileContents("Standard output of node", stdoutFile); + logFileContents("Standard error of node", stderrFile); } opensearchProcess = null; // Clean up the ports file in case this is started again. @@ -1014,16 +902,13 @@ private void stopProcess(ProcessHandle processHandle, boolean forcibly) { return; } - // Stop all children last - if the ML processes are killed before the ES JVM then + // Stop all children last - if the ML processes are killed before the OpenSearch JVM then // they'll be recorded as having failed and won't restart when the cluster restarts. - // ES could actually be a child when there's some wrapper process like on Windows, + // OpenSearch could actually be a child when there's some wrapper process like on Windows, // and in that case the ML processes will be grandchildren of the wrapper. List children = processHandle.children().collect(Collectors.toList()); try { - logProcessInfo( - "Terminating " + currentConfig.command + " process" + (forcibly ? " forcibly " : "gracefully") + ":", - processHandle.info() - ); + logProcessInfo("Terminating opensearch process" + (forcibly ? " forcibly " : "gracefully") + ":", processHandle.info()); if (forcibly) { processHandle.destroyForcibly(); @@ -1043,7 +928,7 @@ private void stopProcess(ProcessHandle processHandle, boolean forcibly) { waitForProcessToExit(processHandle); if (processHandle.isAlive()) { - throw new TestClustersException("Was not able to terminate " + currentConfig.command + " process for " + this); + throw new TestClustersException("Was not able to terminate opensearch process for " + this); } } finally { children.forEach(each -> stopProcess(each, forcibly)); @@ -1051,7 +936,7 @@ private void stopProcess(ProcessHandle processHandle, boolean forcibly) { waitForProcessToExit(processHandle); if (processHandle.isAlive()) { - throw new TestClustersException("Was not able to terminate " + currentConfig.command + " process for " + this); + throw new TestClustersException("Was not able to terminate opensearch process for " + this); } } @@ -1135,7 +1020,7 @@ private void waitForProcessToExit(ProcessHandle processHandle) { try { processHandle.onExit().get(OPENSEARCH_DESTROY_TIMEOUT, OPENSEARCH_DESTROY_TIMEOUT_UNIT); } catch (InterruptedException e) { - LOGGER.info("Interrupted while waiting for {} process", currentConfig.command, e); + LOGGER.info("Interrupted while waiting for opensearch process", e); Thread.currentThread().interrupt(); } catch (ExecutionException e) { LOGGER.info("Failure while waiting for process to exist", e); @@ -1146,8 +1031,8 @@ private void waitForProcessToExit(ProcessHandle processHandle) { private void createWorkingDir() throws IOException { // Start configuration from scratch in case of a restart - fileSystemOperations.delete(d -> d.delete(currentConfig.configFile.getParent())); - Files.createDirectories(currentConfig.configFile.getParent()); + fileSystemOperations.delete(d -> d.delete(configFile.getParent())); + Files.createDirectories(configFile.getParent()); Files.createDirectories(confPathRepo); Files.createDirectories(confPathData); Files.createDirectories(confPathLogs); @@ -1250,42 +1135,27 @@ private void createConfiguration() { } baseConfig.put("node.portsfile", "true"); baseConfig.put("http.port", httpPort); - if (getVersion().onOrAfter(Version.fromString("6.7.0"))) { - baseConfig.put("transport.port", transportPort); - } else { - baseConfig.put("transport.tcp.port", transportPort); - } + baseConfig.put("transport.port", transportPort); // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space baseConfig.put("cluster.routing.allocation.disk.watermark.low", "1b"); baseConfig.put("cluster.routing.allocation.disk.watermark.high", "1b"); // increase script compilation limit since tests can rapid-fire script compilations - if (getVersion().onOrAfter(Version.fromString("7.9.0"))) { - baseConfig.put("script.disable_max_compilations_rate", "true"); - } else { - baseConfig.put("script.max_compilations_rate", "2048/1m"); - } + baseConfig.put("script.disable_max_compilations_rate", "true"); baseConfig.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); // Temporarily disable the real memory usage circuit breaker. It depends on real memory usage which we have no full control // over and the REST client will not retry on circuit breaking exceptions yet (see #31986 for details). Once the REST client // can retry on circuit breaking exceptions, we can revert again to the default configuration. - if (getVersion().onOrAfter("7.0.0")) { - baseConfig.put("indices.breaker.total.use_real_memory", "false"); - } + baseConfig.put("indices.breaker.total.use_real_memory", "false"); // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master baseConfig.put("discovery.initial_state_timeout", "0s"); // TODO: Remove these once https://github.com/elastic/elasticsearch/issues/46091 is fixed - if (getVersion().onOrAfter("1.0.0")) { - baseConfig.put("logger.org.opensearch.action.support.master", "DEBUG"); - baseConfig.put("logger.org.opensearch.cluster.coordination", "DEBUG"); - } else { - baseConfig.put("logger.org.elasticsearch.action.support.master", "DEBUG"); - baseConfig.put("logger.org.elasticsearch.cluster.coordination", "DEBUG"); - } + baseConfig.put("logger.org.opensearch.action.support.master", "DEBUG"); + baseConfig.put("logger.org.opensearch.cluster.coordination", "DEBUG"); HashSet overriden = new HashSet<>(baseConfig.keySet()); overriden.retainAll(settings.keySet()); - overriden.removeAll(OVERRIDABLE_SETTINGS); + OVERRIDABLE_SETTINGS.forEach(overriden::remove); if (overriden.isEmpty() == false) { throw new IllegalArgumentException( "Testclusters does not allow the following settings to be changed:" + overriden + " for " + this @@ -1294,10 +1164,10 @@ private void createConfiguration() { // Make sure no duplicate config keys settings.keySet().stream().filter(OVERRIDABLE_SETTINGS::contains).forEach(baseConfig::remove); - final Path configFileRoot = currentConfig.configFile.getParent(); + final Path configFileRoot = configFile.getParent(); try { Files.write( - currentConfig.configFile, + configFile, Stream.concat(settings.entrySet().stream(), baseConfig.entrySet().stream()) .map(entry -> entry.getKey() + ": " + entry.getValue()) .collect(Collectors.joining("\n")) @@ -1312,17 +1182,17 @@ private void createConfiguration() { } logToProcessStdout("Copying additional config files from distro " + configFiles); for (Path file : configFiles) { - Path dest = currentConfig.configFile.getParent().resolve(file.getFileName()); + Path dest = configFile.getParent().resolve(file.getFileName()); if (Files.exists(dest) == false) { Files.copy(file, dest); } } } catch (IOException e) { - throw new UncheckedIOException("Could not write config file: " + currentConfig.configFile, e); + throw new UncheckedIOException("Could not write config file: " + configFile, e); } tweakJvmOptions(configFileRoot); - LOGGER.info("Written config file:{} for {}", currentConfig.configFile, this); + LOGGER.info("Written config file:{} for {}", configFile, this); } private void tweakJvmOptions(Path configFileRoot) { @@ -1346,18 +1216,11 @@ private void tweakJvmOptions(Path configFileRoot) { private Map jvmOptionExpansions() { Map expansions = new HashMap<>(); Version version = getVersion(); - String heapDumpOrigin = getVersion().onOrAfter("6.3.0") ? "-XX:HeapDumpPath=data" : "-XX:HeapDumpPath=/heap/dump/path"; + String heapDumpOrigin = "-XX:HeapDumpPath=data"; Path relativeLogPath = workingDir.relativize(confPathLogs); - expansions.put(heapDumpOrigin, "-XX:HeapDumpPath=" + relativeLogPath.toString()); - if (version.onOrAfter("6.2.0")) { - expansions.put("logs/gc.log", relativeLogPath.resolve("gc.log").toString()); - } - if (getVersion().onOrAfter("7.0.0")) { - expansions.put( - "-XX:ErrorFile=logs/hs_err_pid%p.log", - "-XX:ErrorFile=" + relativeLogPath.resolve("hs_err_pid%p.log").toString() - ); - } + expansions.put(heapDumpOrigin, "-XX:HeapDumpPath=" + relativeLogPath); + expansions.put("logs/gc.log", relativeLogPath.resolve("gc.log").toString()); + expansions.put("-XX:ErrorFile=logs/hs_err_pid%p.log", "-XX:ErrorFile=" + relativeLogPath.resolve("hs_err_pid%p.log")); return expansions; } @@ -1488,7 +1351,7 @@ void waitForAllConditions() { // Installing plugins at config time and loading them when nods start requires additional time we need to // account for ADDITIONAL_CONFIG_TIMEOUT_UNIT.toMillis( - ADDITIONAL_CONFIG_TIMEOUT * (plugins.size() + keystoreFiles.size() + keystoreSettings.size() + credentials.size()) + (long) ADDITIONAL_CONFIG_TIMEOUT * (plugins.size() + keystoreFiles.size() + keystoreSettings.size() + credentials.size()) ), TimeUnit.MILLISECONDS, this); } @@ -1546,17 +1409,17 @@ void setDataPath(Path dataPath) { @Internal Path getOpensearchStdoutFile() { - return currentConfig.stdoutFile; + return stdoutFile; } @Internal Path getOpensearchStderrFile() { - return currentConfig.stderrFile; + return stderrFile; } private static class FileEntry implements Named { - private String name; - private File file; + private final String name; + private final File file; FileEntry(String name, File file) { this.name = name; @@ -1577,8 +1440,8 @@ public File getFile() { } private static class CliEntry { - private String executable; - private CharSequence[] args; + private final String executable; + private final CharSequence[] args; CliEntry(String executable, CharSequence[] args) { this.executable = executable; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java index e5d264121b0aa..e5c413df00d0d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java @@ -31,7 +31,6 @@ package org.opensearch.gradle.testclusters; -import org.opensearch.gradle.Jdk; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.tasks.Nested; @@ -52,9 +51,6 @@ default void useCluster(OpenSearchCluster cluster) { // Add configured distributions as task dependencies so they are built before starting the cluster cluster.getNodes().stream().flatMap(node -> node.getDistributions().stream()).forEach(distro -> dependsOn(distro.getExtracted())); - // Add legacy BWC JDK runtime as a dependency so it's downloaded before starting the cluster if necessary - cluster.getNodes().stream().map(node -> (Callable) node::getBwcJdk).forEach(this::dependsOn); - cluster.getNodes().forEach(node -> dependsOn((Callable>) node::getPluginAndModuleConfigurations)); getClusters().add(cluster); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java index 2ef14a39b6669..8735970b0d65b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersPlugin.java @@ -31,11 +31,8 @@ package org.opensearch.gradle.testclusters; -import org.opensearch.gradle.Architecture; import org.opensearch.gradle.DistributionDownloadPlugin; -import org.opensearch.gradle.Jdk; import org.opensearch.gradle.JdkDownloadPlugin; -import org.opensearch.gradle.OS; import org.opensearch.gradle.ReaperPlugin; import org.opensearch.gradle.ReaperService; import org.opensearch.gradle.info.BuildParams; @@ -68,8 +65,6 @@ public class TestClustersPlugin implements Plugin { private static final String LIST_TASK_NAME = "listTestClusters"; private static final String REGISTRY_SERVICE_NAME = "testClustersRegistry"; - private static final String LEGACY_JAVA_VENDOR = "adoptopenjdk"; - private static final String LEGACY_JAVA_VERSION = "8u242+b08"; private static final Logger logger = Logging.getLogger(TestClustersPlugin.class); @Inject @@ -95,16 +90,8 @@ public void apply(Project project) { ReaperService reaper = project.getRootProject().getExtensions().getByType(ReaperService.class); - // register legacy jdk distribution for testing pre-7.0 BWC clusters - Jdk bwcJdk = JdkDownloadPlugin.getContainer(project).create("bwc_jdk", jdk -> { - jdk.setVendor(LEGACY_JAVA_VENDOR); - jdk.setVersion(LEGACY_JAVA_VERSION); - jdk.setPlatform(OS.current().name().toLowerCase()); - jdk.setArchitecture(Architecture.current().name().toLowerCase()); - }); - // enable the DSL to describe clusters - NamedDomainObjectContainer container = createTestClustersContainerExtension(project, reaper, bwcJdk); + NamedDomainObjectContainer container = createTestClustersContainerExtension(project, reaper); // provide a task to be able to list defined clusters. createListClustersTask(project, container); @@ -125,11 +112,7 @@ public void apply(Project project) { project.getRootProject().getPluginManager().apply(TestClustersHookPlugin.class); } - private NamedDomainObjectContainer createTestClustersContainerExtension( - Project project, - ReaperService reaper, - Jdk bwcJdk - ) { + private NamedDomainObjectContainer createTestClustersContainerExtension(Project project, ReaperService reaper) { // Create an extensions that allows describing clusters NamedDomainObjectContainer container = project.container( OpenSearchCluster.class, @@ -139,8 +122,7 @@ private NamedDomainObjectContainer createTestClustersContaine reaper, new File(project.getBuildDir(), "testclusters"), getFileSystemOperations(), - getArchiveOperations(), - bwcJdk + getArchiveOperations() ) ); project.getExtensions().add(EXTENSION_NAME, container); diff --git a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java index d7798ef5040bb..1a9647573f948 100644 --- a/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java +++ b/buildSrc/src/test/java/org/opensearch/gradle/DistributionDownloadPluginTests.java @@ -86,7 +86,7 @@ public void testCustomDistributionUrlWithUrl() { project.getExtensions().getExtraProperties().set("customDistributionUrl", customUrl); DistributionDownloadPlugin plugin = new DistributionDownloadPlugin(); plugin.apply(project); - assertEquals(4, project.getRepositories().size()); + assertEquals(2, project.getRepositories().size()); assertEquals( ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-downloads")).getUrl().toString(), customUrl @@ -95,22 +95,13 @@ public void testCustomDistributionUrlWithUrl() { ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-snapshots")).getUrl().toString(), customUrl ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-downloads")).getUrl().toString(), - "https://artifacts-no-kpi.elastic.co" - ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-snapshots")).getUrl().toString(), - "https://snapshots-no-kpi.elastic.co" - ); - } public void testCustomDistributionUrlWithoutUrl() { Project project = ProjectBuilder.builder().build(); DistributionDownloadPlugin plugin = new DistributionDownloadPlugin(); plugin.apply(project); - assertEquals(5, project.getRepositories().size()); + assertEquals(3, project.getRepositories().size()); assertEquals( ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-downloads")).getUrl().toString(), "https://artifacts.opensearch.org" @@ -123,14 +114,6 @@ public void testCustomDistributionUrlWithoutUrl() { ((DefaultIvyArtifactRepository) project.getRepositories().getAt("opensearch-snapshots")).getUrl().toString(), "https://artifacts.opensearch.org" ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-downloads")).getUrl().toString(), - "https://artifacts-no-kpi.elastic.co" - ); - assertEquals( - ((DefaultIvyArtifactRepository) project.getRepositories().getAt("elasticsearch-snapshots")).getUrl().toString(), - "https://snapshots-no-kpi.elastic.co" - ); } public void testBadVersionFormat() { @@ -332,7 +315,8 @@ private void checkBwc( Project archiveProject = ProjectBuilder.builder().withParent(bwcProject).withName(projectName).build(); archiveProject.getConfigurations().create(config); archiveProject.getArtifacts().add(config, new File("doesnotmatter")); - createDistro(project, "distro", version.toString(), type, platform, true); + final OpenSearchDistribution distro = createDistro(project, "distro", version.toString(), type, platform, true); + distro.setArchitecture(Architecture.current()); checkPlugin(project); } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java index 1d67a50f68f40..ca4446258446b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/core/CountResponse.java @@ -233,7 +233,7 @@ static ShardStats fromXContent(XContentParser parser) throws IOException { parser.skipChildren(); } } - return new ShardStats(successfulShards, totalShards, skippedShards, failures.toArray(new ShardSearchFailure[failures.size()])); + return new ShardStats(successfulShards, totalShards, skippedShards, failures.toArray(new ShardSearchFailure[0])); } @Override diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java index 5f43ec7f1d0fe..cd0eb8881ab0c 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java @@ -435,7 +435,7 @@ public PutIndexTemplateRequest alias(Alias alias) { @Override public String[] indices() { - return indexPatterns.toArray(new String[indexPatterns.size()]); + return indexPatterns.toArray(new String[0]); } @Override diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java index 5743820ff0175..1499b006da410 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java @@ -153,7 +153,7 @@ static CustomResponseSection2 fromXContent(XContentParser parser) throws IOExcep values.add(parser.text()); } assertEquals(XContentParser.Token.END_ARRAY, parser.currentToken()); - CustomResponseSection2 responseSection2 = new CustomResponseSection2(values.toArray(new String[values.size()])); + CustomResponseSection2 responseSection2 = new CustomResponseSection2(values.toArray(new String[0])); assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); return responseSection2; } diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index c758826b776ba..b062d937ed630 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -147,7 +147,7 @@ public HttpEntity getEntity() { * @return {@code true} if the input string matches the specification */ private static boolean matchWarningHeaderPatternByPrefix(final String s) { - return s.startsWith("299 OpenSearch-") || s.startsWith("299 Elasticsearch-"); + return s.startsWith("299 OpenSearch-"); } /** diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index ef1035489c9fc..6cd5feadbef87 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -78,3 +78,6 @@ ${error.file} # Explicitly allow security manager (https://bugs.openjdk.java.net/browse/JDK-8270380) 18-:-Djava.security.manager=allow + +# Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4 (https://github.com/opensearch-project/OpenSearch/issues/4637) +19-:--enable-preview diff --git a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java index fc613ccdaae68..aa3dfbe39ee96 100644 --- a/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java +++ b/distribution/tools/launchers/src/main/java/org/opensearch/tools/launchers/SystemJvmOptions.java @@ -77,12 +77,21 @@ static List systemJvmOptions() { // log4j 2 "-Dlog4j.shutdownHookEnabled=false", "-Dlog4j2.disable.jmx=true", - + // security manager + allowSecurityManagerOption(), javaLocaleProviders() ) ).stream().filter(e -> e.isEmpty() == false).collect(Collectors.toList()); } + private static String allowSecurityManagerOption() { + if (Runtime.version().feature() > 17) { + return "-Djava.security.manager=allow"; + } else { + return ""; + } + } + private static String maybeShowCodeDetailsInExceptionMessages() { if (Runtime.version().feature() >= 14) { return "-XX:+ShowCodeDetailsInExceptionMessages"; diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index c2db39ecea072..5bf0bc7763ddd 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -1028,7 +1028,7 @@ private static void setFileAttributes(final Path path, final Set if (BuildParams.getIsRuntimeJavaHomeSet()) { - test.executable = "${BuildParams.runtimeJavaHome}/bin/java" + if (OS.current() == OS.WINDOWS) { + test.executable = "${BuildParams.runtimeJavaHome}/bin/java.exe" + } else { + test.executable = "${BuildParams.runtimeJavaHome}/bin/java" + } } } } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 57dec44f2a013..ea7765b9bc58d 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-rc-2-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-rc-3-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=be5387477162265eac882b9c83d756d8d2db170380e36fba2fdbee83d87de0d7 +distributionSha256Sum=abc6de2653ec9befb00cc0d064ce1ca4e4dab2c91955e830661505189c0b2f08 diff --git a/libs/x-content/src/main/java/org/opensearch/common/ParseField.java b/libs/x-content/src/main/java/org/opensearch/common/ParseField.java index 8673e25bf567b..8f97fd923b560 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/ParseField.java +++ b/libs/x-content/src/main/java/org/opensearch/common/ParseField.java @@ -68,12 +68,12 @@ public ParseField(String name, String... deprecatedNames) { } else { final HashSet set = new HashSet<>(); Collections.addAll(set, deprecatedNames); - this.deprecatedNames = set.toArray(new String[set.size()]); + this.deprecatedNames = set.toArray(new String[0]); } Set allNames = new HashSet<>(); allNames.add(name); Collections.addAll(allNames, this.deprecatedNames); - this.allNames = allNames.toArray(new String[allNames.size()]); + this.allNames = allNames.toArray(new String[0]); } /** diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPath.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPath.java index be7778097b45b..a11b13ec65946 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPath.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPath.java @@ -101,7 +101,7 @@ public static FilterPath[] compile(Set filters) { } } } - return paths.toArray(new FilterPath[paths.size()]); + return paths.toArray(new FilterPath[0]); } private static FilterPath parse(final String filter, final String segment) { diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPathBasedFilter.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPathBasedFilter.java index 0463caaa93118..5e402cbd495ba 100644 --- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPathBasedFilter.java +++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/support/filtering/FilterPathBasedFilter.java @@ -95,7 +95,7 @@ private TokenFilter evaluate(String name, FilterPath[] filters) { } if ((nextFilters != null) && (nextFilters.isEmpty() == false)) { - return new FilterPathBasedFilter(nextFilters.toArray(new FilterPath[nextFilters.size()]), inclusive); + return new FilterPathBasedFilter(nextFilters.toArray(new FilterPath[0]), inclusive); } } return NO_MATCHING; diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 7dce788f3a4a4..8c6f279c445b3 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,7 +39,7 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:3.0.1') + api('com.maxmind.geoip2:geoip2:3.0.2') // geoip2 dependencies: api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") diff --git a/modules/ingest-geoip/licenses/geoip2-3.0.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-3.0.1.jar.sha1 deleted file mode 100644 index f1d5ac5aea546..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-3.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8a814ae92a1d8c35f82d0ff76d86927c191b7916 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 new file mode 100644 index 0000000000000..2ff70cf499713 --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-3.0.2.jar.sha1 @@ -0,0 +1 @@ +f0ab0a451309c93f0fb6bf3cb203ba19d452c800 \ No newline at end of file diff --git a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java index d684f0bfebcfb..cacd7c3a23824 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java +++ b/modules/parent-join/src/internalClusterTest/java/org/opensearch/join/query/ChildQuerySearchIT.java @@ -310,7 +310,7 @@ public void testHasParentFilter() throws Exception { } assertThat(parentToChildren.get(previousParentId).add(childId), is(true)); } - indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()])); + indexRandom(true, builders.toArray(new IndexRequestBuilder[0])); assertThat(parentToChildren.isEmpty(), equalTo(false)); for (Map.Entry> parentToChildrenEntry : parentToChildren.entrySet()) { diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java index b2130eca3bb02..4f4665c434c67 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java @@ -546,7 +546,6 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { String name = this.name != null ? this.name : pft.name(); QueryShardContext percolateShardContext = wrap(context); PercolatorFieldMapper.configureContext(percolateShardContext, pft.mapUnmappedFieldsAsText); - ; PercolateQuery.QueryStore queryStore = createStore(pft.queryBuilderField, percolateShardContext); return pft.percolateQuery(name, queryStore, documents, docSearcher, excludeNestedDocuments, context.indexVersionCreated()); diff --git a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java index 8cfde2d2b412e..4fce04e23119c 100644 --- a/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/opensearch/index/rankeval/TransportRankEvalAction.java @@ -148,7 +148,7 @@ protected void doExecute(Task task, RankEvalRequest request, ActionListener indexingFailures, List search return; } RefreshRequest refresh = new RefreshRequest(); - refresh.indices(destinationIndices.toArray(new String[destinationIndices.size()])); + refresh.indices(destinationIndices.toArray(new String[0])); logger.debug("[{}]: refreshing", task.getId()); client.admin().indices().refresh(refresh, new ActionListener() { @Override diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java index baf3c83bd0050..6874f96628761 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/DeleteByQueryBasicTests.java @@ -362,7 +362,7 @@ public void testMultipleSources() throws Exception { int slices = randomSlices(1, 10); int expectedSlices = expectedSliceStatuses(slices, docs.keySet()); - String[] sourceIndexNames = docs.keySet().toArray(new String[docs.size()]); + String[] sourceIndexNames = docs.keySet().toArray(new String[0]); assertThat( deleteByQuery().source(sourceIndexNames).filter(QueryBuilders.matchAllQuery()).refresh(true).setSlices(slices).get(), diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java index 0c660e5df9682..24adba16d0bad 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexBasicTests.java @@ -161,7 +161,7 @@ public void testMultipleSources() throws Exception { int slices = randomSlices(1, 10); int expectedSlices = expectedSliceStatuses(slices, docs.keySet()); - String[] sourceIndexNames = docs.keySet().toArray(new String[docs.size()]); + String[] sourceIndexNames = docs.keySet().toArray(new String[0]); ReindexRequestBuilder request = reindex().source(sourceIndexNames).destination("dest").refresh(true).setSlices(slices); BulkByScrollResponse response = request.get(); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java index 4f48b99dccdd4..987fab954a8d0 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/UpdateByQueryBasicTests.java @@ -151,7 +151,7 @@ public void testMultipleSources() throws Exception { int slices = randomSlices(1, 10); int expectedSlices = expectedSliceStatuses(slices, docs.keySet()); - String[] sourceIndexNames = docs.keySet().toArray(new String[docs.size()]); + String[] sourceIndexNames = docs.keySet().toArray(new String[0]); BulkByScrollResponse response = updateByQuery().source(sourceIndexNames).refresh(true).setSlices(slices).get(); assertThat(response, matcher().updated(allDocs.size()).slices(hasSize(expectedSlices))); diff --git a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java index ac4def8ac9a11..2939711f6f7e1 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/opensearch/index/analysis/KuromojiTokenizerFactory.java @@ -79,6 +79,9 @@ public KuromojiTokenizerFactory(IndexSettings indexSettings, Environment env, St private static String parse(String rule, Set dup) { String[] values = CSVUtil.parse(rule); + if (values.length == 0) { + throw new IllegalArgumentException("Malformed csv in user dictionary."); + } if (dup.add(values[0]) == false) { throw new IllegalArgumentException("Found duplicate term [" + values[0] + "] in user dictionary."); } diff --git a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java index 23e6ef9fea059..03d9df6ebd6b2 100644 --- a/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java +++ b/plugins/analysis-kuromoji/src/test/java/org/opensearch/index/analysis/KuromojiAnalysisTests.java @@ -379,6 +379,15 @@ public void testKuromojiAnalyzerInvalidUserDictOption() throws Exception { ); } + public void testKuromojiAnalyzerEmptyDictRule() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "\"") + .build(); + RuntimeException exc = expectThrows(RuntimeException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), equalTo("Line [1]: Malformed csv in user dictionary.")); + } + public void testKuromojiAnalyzerDuplicateUserDictRule() throws Exception { Settings settings = Settings.builder() .put("index.analysis.analyzer.my_analyzer.type", "kuromoji") diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 7bf67769cda10..f42b44b56ccb8 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -84,7 +84,7 @@ dependencies { // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork - api 'org.apache.commons:commons-compress:1.21' + api 'org.apache.commons:commons-compress:1.22' // Outlook documents api "org.apache.james:apache-mime4j-core:${versions.mime4j}" api "org.apache.james:apache-mime4j-dom:${versions.mime4j}" diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.21.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.21.jar.sha1 deleted file mode 100644 index 81ac609a1aa26..0000000000000 --- a/plugins/ingest-attachment/licenses/commons-compress-1.21.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ec95b60d4e86b5c95a0e919cb172a0af98011ef \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/commons-compress-1.22.jar.sha1 b/plugins/ingest-attachment/licenses/commons-compress-1.22.jar.sha1 new file mode 100644 index 0000000000000..9ab7216c8050a --- /dev/null +++ b/plugins/ingest-attachment/licenses/commons-compress-1.22.jar.sha1 @@ -0,0 +1 @@ +691a8b4e6cf4248c3bc72c8b719337d5cb7359fa \ No newline at end of file diff --git a/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java b/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java index 8a94fea0ebbd4..48a41dd4afaf3 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java +++ b/plugins/mapper-annotated-text/src/main/java/org/opensearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java @@ -237,7 +237,7 @@ public AnnotationToken[] getIntersectingAnnotations(int start, int end) { // add 1 for the fieldvalue separator character fieldValueOffset += fieldValueAnnotations.textMinusMarkup.length() + 1; } - return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]); + return intersectingAnnotations.toArray(new AnnotationToken[0]); } private void append(StringBuilder dest, String content, int start, int end) { diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 063851b3a7edf..3aa2bbb7dd2f6 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -54,7 +54,7 @@ dependencies { api "io.netty:netty-resolver-dns:${versions.netty}" api "io.netty:netty-transport-native-unix-common:${versions.netty}" implementation project(':modules:transport-netty4') - api 'com.azure:azure-storage-blob:12.16.1' + api 'com.azure:azure-storage-blob:12.20.0' api 'org.reactivestreams:reactive-streams:1.0.4' api 'io.projectreactor:reactor-core:3.4.23' api 'io.projectreactor.netty:reactor-netty:1.0.18' diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.16.1.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.16.1.jar.sha1 deleted file mode 100644 index 71014103e517b..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-blob-12.16.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84054ca8a6660eb77910925d71f70330fd3d83aa \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-blob-12.20.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-blob-12.20.0.jar.sha1 new file mode 100644 index 0000000000000..de86848c9fd06 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-blob-12.20.0.jar.sha1 @@ -0,0 +1 @@ +e682920b0e3115433f25d65b0718f8763035357e \ No newline at end of file diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 05e879547a4b0..9528537a3dd5e 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -81,7 +81,7 @@ dependencies { api 'com.google.api:gax-httpjson:0.103.1' api 'io.grpc:grpc-context:1.46.0' api 'io.opencensus:opencensus-api:0.18.0' - api 'io.opencensus:opencensus-contrib-http-util:0.18.0' + api 'io.opencensus:opencensus-contrib-http-util:0.31.1' api 'com.google.apis:google-api-services-storage:v1-rev20220608-1.32.1' testImplementation project(':test:fixtures:gcs-fixture') @@ -209,6 +209,8 @@ thirdPartyAudit { 'javax.jms.Message', 'javax.servlet.ServletContextEvent', 'javax.servlet.ServletContextListener', + 'io.opencensus.tags.TagMetadata', + 'io.opencensus.tags.TagMetadata$TagTtl', ) } diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 deleted file mode 100644 index 1757e00591110..0000000000000 --- a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76a37e4a931d5801a9e25b0c0353e5f37c4d1e8e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 new file mode 100644 index 0000000000000..4e123da3ab45f --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.31.1.jar.sha1 @@ -0,0 +1 @@ +3c13fc5715231fadb16a9b74a44d9d59c460cfa8 \ No newline at end of file diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RefreshVersionInClusterStateIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RefreshVersionInClusterStateIT.java index b6945047300e0..56cb6ad5c31aa 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RefreshVersionInClusterStateIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RefreshVersionInClusterStateIT.java @@ -18,7 +18,7 @@ public class RefreshVersionInClusterStateIT extends AbstractRollingTestCase { /* - This test ensures that after the upgrade from ElasticSearch/ OpenSearch all nodes report the version on and after 1.0.0 + * This test ensures that after the upgrade, all nodes report the current version */ public void testRefresh() throws IOException { switch (CLUSTER_TYPE) { diff --git a/server/build.gradle b/server/build.gradle index dc1292cbf824d..3dc0f1d380647 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -164,22 +164,6 @@ tasks.named("internalClusterTest").configure { jvmArgs -= '-XX:TieredStopAtLevel=1' } -// Until this project is always being formatted with spotless, we need to -// guard against `spotless()` not existing. -try { - spotless { - java { - // Contains large data tables that do not format well. - targetExclude 'src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java' - } - } -} -catch (Exception e) { - if (e.getMessage().contains("Could not find method spotless") == false) { - throw e; - } -} - tasks.named("forbiddenPatterns").configure { exclude '**/*.json' exclude '**/*.jmx' diff --git a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java index 17366cf0d08fc..c2eb563145d43 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/IndicesRequestIT.java @@ -708,7 +708,7 @@ private String[] randomUniqueIndices() { while (uniqueIndices.size() < count) { uniqueIndices.add(randomFrom(this.indices)); } - return uniqueIndices.toArray(new String[uniqueIndices.size()]); + return uniqueIndices.toArray(new String[0]); } private static void assertAllRequestsHaveBeenConsumed() { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java index ffc738ac98de5..483571f953b5e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/get/GetIndexIT.java @@ -163,7 +163,7 @@ public void testSimpleMixedFeatures() { } GetIndexResponse response = runWithRandomFeatureMethod( client().admin().indices().prepareGetIndex().addIndices("idx"), - features.toArray(new Feature[features.size()]) + features.toArray(new Feature[0]) ); String[] indices = response.indices(); assertThat(indices, notNullValue()); @@ -194,7 +194,7 @@ public void testEmptyMixedFeatures() { } GetIndexResponse response = runWithRandomFeatureMethod( client().admin().indices().prepareGetIndex().addIndices("empty_idx"), - features.toArray(new Feature[features.size()]) + features.toArray(new Feature[0]) ); String[] indices = response.indices(); assertThat(indices, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index 067b127a667b4..aa0f90bc4a6d9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -60,6 +60,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; import static org.opensearch.test.NodeRoles.onlyRole; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; @@ -236,8 +237,8 @@ public void testInvariantsAndLogsOnDecommissionedNodes() throws Exception { // Will wait for all events to complete client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - String currentClusterManager = internalCluster().getClusterManagerName(); String decommissionedNode = randomFrom(clusterManagerNodes.get(0), dataNodes.get(0)); + String activeNode = dataNodes.get(1); ClusterService decommissionedNodeClusterService = internalCluster().getInstance(ClusterService.class, decommissionedNode); DecommissionAttributeMetadata metadata = decommissionedNodeClusterService.state() @@ -277,7 +278,7 @@ public boolean innerMatch(LogEvent event) { ); TransportService clusterManagerTransportService = internalCluster().getInstance( TransportService.class, - internalCluster().getClusterManagerName() + internalCluster().getClusterManagerName(activeNode) ); MockTransportService decommissionedNodeTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, @@ -302,16 +303,54 @@ public boolean innerMatch(LogEvent event) { assertFalse(coordinator.localNodeCommissioned()); // Recommissioning the zone back to gracefully succeed the test once above tests succeeds - DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(currentClusterManager).execute( + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(activeNode).execute( DeleteDecommissionStateAction.INSTANCE, new DeleteDecommissionStateRequest() ).get(); assertTrue(deleteDecommissionStateResponse.isAcknowledged()); - // Will wait for all events to complete - client(currentClusterManager).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + ClusterService activeNodeClusterService = internalCluster().getInstance(ClusterService.class, activeNode); + ClusterStateObserver clusterStateObserver = new ClusterStateObserver( + activeNodeClusterService, + null, + logger, + client(activeNode).threadPool().getThreadContext() + ); + CountDownLatch expectedStateLatch = new CountDownLatch(1); + Predicate expectedClusterStatePredicate = clusterState -> { + if (clusterState.metadata().decommissionAttributeMetadata() != null) return false; + if (clusterState.metadata().coordinationMetadata().getVotingConfigExclusions().isEmpty() == false) return false; + if (clusterState.nodes().getNodes().size() != 6) return false; + return clusterState.metadata().coordinationMetadata().getLastCommittedConfiguration().getNodeIds().size() == 3; + }; + + ClusterState currentState = activeNodeClusterService.state(); + if (expectedClusterStatePredicate.test(currentState)) { + logger.info("cluster restored"); + expectedStateLatch.countDown(); + } else { + clusterStateObserver.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + logger.info("cluster restored"); + expectedStateLatch.countDown(); + } + + @Override + public void onClusterServiceClose() { + throw new AssertionError("unexpected close"); + } + + @Override + public void onTimeout(TimeValue timeout) { + throw new AssertionError("unexpected timeout"); + } + }, expectedClusterStatePredicate); + } + // if the below condition is passed, then we are sure that config size is restored + assertTrue(expectedStateLatch.await(180, TimeUnit.SECONDS)); // will wait for cluster to stabilise with a timeout of 2 min as by then all nodes should have joined the cluster - ensureStableCluster(6, TimeValue.timeValueSeconds(121)); + ensureStableCluster(6); } private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterManagerDecommission) throws Exception { @@ -346,17 +385,27 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana clusterManagerNameToZone.put(clusterManagerNodes.get(2), "c"); logger.info("--> starting 4 data nodes each on zones 'a' & 'b' & 'c'"); - List nodes_in_zone_a = internalCluster().startDataOnlyNodes( - dataNodeCountPerAZ, - Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + Map> zoneToNodesMap = new HashMap<>(); + zoneToNodesMap.put( + "a", + internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ) ); - List nodes_in_zone_b = internalCluster().startDataOnlyNodes( - dataNodeCountPerAZ, - Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + zoneToNodesMap.put( + "b", + internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ) ); - List nodes_in_zone_c = internalCluster().startDataOnlyNodes( - dataNodeCountPerAZ, - Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + zoneToNodesMap.put( + "c", + internalCluster().startDataOnlyNodes( + dataNodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ) ); ensureStableCluster(15); ClusterHealthResponse health = client().admin() @@ -381,6 +430,20 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana tempZones.remove(originalClusterManagerZone); zoneToDecommission = randomFrom(tempZones); } + String activeNode; + switch (zoneToDecommission) { + case "a": + activeNode = randomFrom(randomFrom(zoneToNodesMap.get("b")), randomFrom(zoneToNodesMap.get("c"))); + break; + case "b": + activeNode = randomFrom(randomFrom(zoneToNodesMap.get("a")), randomFrom(zoneToNodesMap.get("c"))); + break; + case "c": + activeNode = randomFrom(randomFrom(zoneToNodesMap.get("a")), randomFrom(zoneToNodesMap.get("b"))); + break; + default: + throw new IllegalStateException("unexpected zone decommissioned"); + } logger.info("--> setting shard routing weights for weighted round robin"); Map weights = new HashMap<>(Map.of("a", 1.0, "b", 1.0, "c", 1.0)); @@ -401,9 +464,9 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); assertTrue(decommissionResponse.isAcknowledged()); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + ClusterState clusterState = client(activeNode).admin().cluster().prepareState().execute().actionGet().getState(); // assert that number of nodes should be 10 ( 2 cluster manager nodes + 8 data nodes ) assertEquals(clusterState.nodes().getNodes().size(), 10); @@ -422,7 +485,7 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana } // assert that decommission status is successful - GetDecommissionStateResponse response = client().execute( + GetDecommissionStateResponse response = client(activeNode).execute( GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest(decommissionAttribute.attributeName()) ).get(); @@ -432,7 +495,7 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana // assert that no node present in Voting Config Exclusion assertEquals(clusterState.metadata().coordinationMetadata().getVotingConfigExclusions().size(), 0); - String currentClusterManager = internalCluster().getClusterManagerName(); + String currentClusterManager = internalCluster().getClusterManagerName(activeNode); assertNotNull(currentClusterManager); if (originalClusterManagerDecommission) { // assert that cluster manager switched during the test @@ -443,7 +506,7 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana } // Will wait for all events to complete - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // Recommissioning the zone back to gracefully succeed the test once above tests succeeds DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(currentClusterManager).execute( @@ -520,7 +583,7 @@ public void testDecommissionFailedWhenDifferentAttributeAlreadyDecommissioned() ); // Will wait for all events to complete - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + client(node_in_c).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); // Recommissioning the zone back to gracefully succeed the test once above tests succeeds DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(node_in_c).execute( @@ -591,20 +654,21 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx assertTrue(weightedRoutingResponse.isAcknowledged()); logger.info("--> starting decommissioning nodes in zone {}", 'c'); + String activeNode = randomFrom(dataNodes.get(0), dataNodes.get(1)); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); // Set the timeout to 0 to do immediate Decommission DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); decommissionRequest.setNoDelay(true); - DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); + DecommissionResponse decommissionResponse = client(activeNode).execute(DecommissionAction.INSTANCE, decommissionRequest).get(); assertTrue(decommissionResponse.isAcknowledged()); // Will wait for all events to complete - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); logger.info("--> Received LANGUID event"); // assert that decommission status is successful - GetDecommissionStateResponse response = client(clusterManagerNodes.get(0)).execute( + GetDecommissionStateResponse response = client(activeNode).execute( GetDecommissionStateAction.INSTANCE, new GetDecommissionStateRequest(decommissionAttribute.attributeName()) ).get(); @@ -612,7 +676,7 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx assertEquals(DecommissionStatus.SUCCESSFUL, response.getDecommissionStatus()); logger.info("--> Decommission status is successful"); - ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); + ClusterState clusterState = client(activeNode).admin().cluster().prepareState().execute().actionGet().getState(); assertEquals(4, clusterState.nodes().getSize()); logger.info("--> Got cluster state with 4 nodes."); @@ -649,10 +713,10 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx logger.info("--> Verified the decommissioned node has in_progress state."); // Will wait for all events to complete - client(clusterManagerNodeAfterDecommission.getName()).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); + client(activeNode).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); logger.info("--> Got LANGUID event"); // Recommissioning the zone back to gracefully succeed the test once above tests succeeds - DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(clusterManagerNodeAfterDecommission.getName()).execute( + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(activeNode).execute( DeleteDecommissionStateAction.INSTANCE, new DeleteDecommissionStateRequest() ).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java b/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java index 328bc14883208..9817861c88e9a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java @@ -116,8 +116,8 @@ public void testThrottlingForSingleNode() throws Exception { ActionListener listener = new ActionListener() { @Override public void onResponse(Object o) { - latch.countDown(); successfulRequest.incrementAndGet(); + latch.countDown(); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index aa99155724661..73bc38be9c83a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -589,7 +589,7 @@ public void testIndexSearchAndRelocateConcurrently() throws Exception { logger.info(" --> checking iteration {}", i); SearchResponse afterRelocation = client().prepareSearch().setSize(ids.size()).get(); assertNoFailures(afterRelocation); - assertSearchHits(afterRelocation, ids.toArray(new String[ids.size()])); + assertSearchHits(afterRelocation, ids.toArray(new String[0])); } stopped.set(true); for (Thread searchThread : searchThreads) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java index 603a141abcaec..25eafc935be6e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/GeoDistanceIT.java @@ -147,7 +147,7 @@ public void setupSuiteScopeCluster() throws Exception { .setSource(jsonBuilder().startObject().field("value", i * 2).field("location", "52.0945, 5.116").endObject()) ); } - indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()])); + indexRandom(true, builders.toArray(new IndexRequestBuilder[0])); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java index 87968bd2117c6..bd0e69ca315ec 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/bucket/LongTermsIT.java @@ -176,7 +176,7 @@ public void setupSuiteScopeCluster() throws Exception { getMultiSortDocs(builders); - indexRandom(true, builders.toArray(new IndexRequestBuilder[builders.size()])); + indexRandom(true, builders.toArray(new IndexRequestBuilder[0])); ensureSearchable(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java index 22890620d6b15..0e62cfc6f8100 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MaxBucketIT.java @@ -162,7 +162,7 @@ public void testDocCountTopLevel() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_bucket")); assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[0]))); } public void testDocCountAsSubAgg() throws Exception { @@ -214,7 +214,7 @@ public void testDocCountAsSubAgg() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_bucket")); assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[0]))); } } @@ -254,7 +254,7 @@ public void testMetricTopLevel() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_bucket")); assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[0]))); } public void testMetricAsSubAgg() throws Exception { @@ -313,7 +313,7 @@ public void testMetricAsSubAgg() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_bucket")); assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[0]))); } } @@ -362,7 +362,7 @@ public void testMetricAsSubAggOfSingleBucketAgg() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_bucket")); assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[0]))); } public void testMetricAsSubAggWithInsertZeros() throws Exception { @@ -419,7 +419,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_bucket")); assertThat(maxBucketValue.value(), equalTo(maxValue)); - assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[0]))); } } @@ -500,7 +500,7 @@ public void testNested() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_histo_bucket")); assertThat(maxBucketValue.value(), equalTo(maxHistoValue)); - assertThat(maxBucketValue.keys(), equalTo(maxHistoKeys.toArray(new String[maxHistoKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxHistoKeys.toArray(new String[0]))); if (maxHistoValue > maxTermsValue) { maxTermsValue = maxHistoValue; maxTermsKeys = new ArrayList<>(); @@ -514,7 +514,7 @@ public void testNested() throws Exception { assertThat(maxBucketValue, notNullValue()); assertThat(maxBucketValue.getName(), equalTo("max_terms_bucket")); assertThat(maxBucketValue.value(), equalTo(maxTermsValue)); - assertThat(maxBucketValue.keys(), equalTo(maxTermsKeys.toArray(new String[maxTermsKeys.size()]))); + assertThat(maxBucketValue.keys(), equalTo(maxTermsKeys.toArray(new String[0]))); } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java index b3929943f0d02..5f7e5e5174254 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/aggregations/pipeline/MinBucketIT.java @@ -148,7 +148,7 @@ public void testDocCountTopLevel() throws Exception { assertThat(minBucketValue, notNullValue()); assertThat(minBucketValue.getName(), equalTo("min_bucket")); assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[0]))); } public void testDocCountAsSubAgg() throws Exception { @@ -200,7 +200,7 @@ public void testDocCountAsSubAgg() throws Exception { assertThat(minBucketValue, notNullValue()); assertThat(minBucketValue.getName(), equalTo("min_bucket")); assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[0]))); } } @@ -240,7 +240,7 @@ public void testMetricTopLevel() throws Exception { assertThat(minBucketValue, notNullValue()); assertThat(minBucketValue.getName(), equalTo("min_bucket")); assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[0]))); } public void testMetricAsSubAgg() throws Exception { @@ -299,7 +299,7 @@ public void testMetricAsSubAgg() throws Exception { assertThat(minBucketValue, notNullValue()); assertThat(minBucketValue.getName(), equalTo("min_bucket")); assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[0]))); } } @@ -357,7 +357,7 @@ public void testMetricAsSubAggWithInsertZeros() throws Exception { assertThat(minBucketValue, notNullValue()); assertThat(minBucketValue.getName(), equalTo("min_bucket")); assertThat(minBucketValue.value(), equalTo(minValue)); - assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[0]))); } } @@ -438,7 +438,7 @@ public void testNested() throws Exception { assertThat(minBucketValue, notNullValue()); assertThat(minBucketValue.getName(), equalTo("min_histo_bucket")); assertThat(minBucketValue.value(), equalTo(minHistoValue)); - assertThat(minBucketValue.keys(), equalTo(minHistoKeys.toArray(new String[minHistoKeys.size()]))); + assertThat(minBucketValue.keys(), equalTo(minHistoKeys.toArray(new String[0]))); if (minHistoValue < minTermsValue) { minTermsValue = minHistoValue; minTermsKeys = new ArrayList<>(); @@ -452,6 +452,6 @@ public void testNested() throws Exception { assertThat(minBucketValue, notNullValue()); assertThat(minBucketValue.getName(), equalTo("min_terms_bucket")); assertThat(minBucketValue.value(), equalTo(minTermsValue)); - assertThat(minBucketValue.keys(), equalTo(minTermsKeys.toArray(new String[minTermsKeys.size()]))); + assertThat(minBucketValue.keys(), equalTo(minTermsKeys.toArray(new String[0]))); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java index c184d876dcb33..1f1384cc5f72d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/basic/SearchWhileRelocatingIT.java @@ -85,7 +85,7 @@ private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throw ) ); } - indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()])); + indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[0])); assertHitCount(client().prepareSearch().get(), (numDocs)); final int numIters = scaledRandomIntBetween(5, 20); for (int i = 0; i < numIters; i++) { diff --git a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java index b3253b036bda6..b0695eaece781 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/morelikethis/MoreLikeThisIT.java @@ -767,7 +767,7 @@ public void testMoreLikeThisUnlike() throws ExecutionException, InterruptedExcep List docs = new ArrayList<>(numFields); for (int i = 0; i < numFields; i++) { docs.add(new Item("test", i + "")); - mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", doc) }).unlike(docs.toArray(new Item[docs.size()])) + mltQuery = moreLikeThisQuery(null, new Item[] { new Item("test", doc) }).unlike(docs.toArray(new Item[0])) .minTermFreq(0) .minDocFreq(0) .maxQueryTerms(100) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index ee9e162a34f5c..78cdfa88cea33 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -149,7 +149,7 @@ public void testBasicWorkFlow() throws Exception { } } if (!indicesToFlush.isEmpty()) { - String[] indices = indicesToFlush.toArray(new String[indicesToFlush.size()]); + String[] indices = indicesToFlush.toArray(new String[0]); logger.info("--> starting asynchronous flush for indices {}", Arrays.toString(indices)); flushResponseFuture = client().admin().indices().prepareFlush(indices).execute(); } @@ -1804,7 +1804,7 @@ public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); - assertAcked(client().admin().indices().prepareDelete(nbDocsPerIndex.keySet().toArray(new String[nbDocsPerIndex.size()]))); + assertAcked(client().admin().indices().prepareDelete(nbDocsPerIndex.keySet().toArray(new String[0]))); Predicate isRestorableIndex = index -> corruptedIndex.getName().equals(index) == false; diff --git a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java index 477f1a7d2f9ba..d1748d7f80995 100644 --- a/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java +++ b/server/src/main/java/org/apache/lucene/search/uhighlight/CustomFieldHighlighter.java @@ -163,7 +163,7 @@ protected Passage[] highlightOffsetsEnums(OffsetsEnum off) throws IOException { } while (off.nextPosition()); maybeAddPassage(passageQueue, passageScorer, passage, contentLength); - Passage[] passages = passageQueue.toArray(new Passage[passageQueue.size()]); + Passage[] passages = passageQueue.toArray(new Passage[0]); // sort in ascending order Arrays.sort(passages, Comparator.comparingInt(Passage::getStartOffset)); return passages; diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index bb1186575cd01..4e667d0a9f3a5 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -655,8 +655,8 @@ public static OpenSearchException[] guessRootCauses(Throwable t) { * parsing exception because that is generally the most interesting * exception to return to the user. If that exception is caused by * an OpenSearchException we'd like to keep unwrapping because - * ElasticserachExceptions tend to contain useful information for - * the user. + * OpenSearchException instances tend to contain useful information + * for the user. */ Throwable cause = ex.getCause(); if (cause != null) { diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index b48384e9439ec..a5f181e0bfbf2 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -86,6 +86,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_1 = new Version(2030199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_2_4_1 = new Version(2040199, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version V_2_5_0 = new Version(2050099, org.apache.lucene.util.Version.LUCENE_9_4_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_3_0_0; diff --git a/server/src/main/java/org/opensearch/action/ThreadingModel.java b/server/src/main/java/org/opensearch/action/ThreadingModel.java deleted file mode 100644 index 11c61152e5107..0000000000000 --- a/server/src/main/java/org/opensearch/action/ThreadingModel.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action; - -/** - * Threading model - * - * @opensearch.internal - */ -public enum ThreadingModel { - NONE((byte) 0), - OPERATION((byte) 1), - LISTENER((byte) 2), - OPERATION_LISTENER((byte) 3); - - private byte id; - - ThreadingModel(byte id) { - this.id = id; - } - - public byte id() { - return this.id; - } - - /** - * {@code true} if the actual operation the action represents will be executed - * on a different thread than the calling thread (assuming it will be executed - * on the same node). - */ - public boolean threadedOperation() { - return this == OPERATION || this == OPERATION_LISTENER; - } - - /** - * {@code true} if the invocation of the action result listener will be executed - * on a different thread (than the calling thread or an "expensive" thread, like the - * IO thread). - */ - public boolean threadedListener() { - return this == LISTENER || this == OPERATION_LISTENER; - } - - public ThreadingModel addListener() { - if (this == NONE) { - return LISTENER; - } - if (this == OPERATION) { - return OPERATION_LISTENER; - } - return this; - } - - public ThreadingModel removeListener() { - if (this == LISTENER) { - return NONE; - } - if (this == OPERATION_LISTENER) { - return OPERATION; - } - return this; - } - - public ThreadingModel addOperation() { - if (this == NONE) { - return OPERATION; - } - if (this == LISTENER) { - return OPERATION_LISTENER; - } - return this; - } - - public ThreadingModel removeOperation() { - if (this == OPERATION) { - return NONE; - } - if (this == OPERATION_LISTENER) { - return LISTENER; - } - return this; - } - - public static ThreadingModel fromId(byte id) { - if (id == 0) { - return NONE; - } else if (id == 1) { - return OPERATION; - } else if (id == 2) { - return LISTENER; - } else if (id == 3) { - return OPERATION_LISTENER; - } else { - throw new IllegalArgumentException("No threading model for [" + id + "]"); - } - } -} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java index a77477f24aa9f..1735827d259db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java @@ -86,7 +86,7 @@ public RestoreRemoteStoreRequest indices(String... indices) { * @return this request */ public RestoreRemoteStoreRequest indices(List indices) { - this.indices = indices.toArray(new String[indices.size()]); + this.indices = indices.toArray(new String[0]); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index cb64718ed5843..42bfa5dd0da83 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -253,7 +253,7 @@ public CreateSnapshotRequest indices(String... indices) { * @return this request */ public CreateSnapshotRequest indices(List indices) { - this.indices = indices.toArray(new String[indices.size()]); + this.indices = indices.toArray(new String[0]); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 4f998b3484642..b019bc9eff476 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -262,7 +262,7 @@ public RestoreSnapshotRequest indices(String... indices) { * @return this request */ public RestoreSnapshotRequest indices(List indices) { - this.indices = indices.toArray(new String[indices.size()]); + this.indices = indices.toArray(new String[0]); return this; } @@ -392,7 +392,7 @@ public RestoreSnapshotRequest ignoreIndexSettings(String... ignoreIndexSettings) * Sets the list of index settings and index settings groups that shouldn't be restored from snapshot */ public RestoreSnapshotRequest ignoreIndexSettings(List ignoreIndexSettings) { - this.ignoreIndexSettings = ignoreIndexSettings.toArray(new String[ignoreIndexSettings.size()]); + this.ignoreIndexSettings = ignoreIndexSettings.toArray(new String[0]); return this; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 384eb7a0b86bb..1efd7d8192019 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -202,13 +202,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); } - return new ClusterStatsNodeResponse( - nodeInfo.getNode(), - clusterStatus, - nodeInfo, - nodeStats, - shardsStats.toArray(new ShardStats[shardsStats.size()]) - ); + return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[0])); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 3e453b42c3d7c..0ab9faa94090a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -122,7 +122,7 @@ protected ClusterBlockException checkBlock(IndicesAliasesRequest request, Cluste for (IndicesAliasesRequest.AliasActions aliasAction : request.aliasActions()) { Collections.addAll(indices, aliasAction.indices()); } - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices.toArray(new String[indices.size()])); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices.toArray(new String[0])); } @Override @@ -195,7 +195,7 @@ protected void clusterManagerOperation( } } if (finalActions.isEmpty() && false == actions.isEmpty()) { - throw new AliasesNotFoundException(aliases.toArray(new String[aliases.size()])); + throw new AliasesNotFoundException(aliases.toArray(new String[0])); } request.aliasActions().clear(); IndicesAliasesClusterStateUpdateRequest updateRequest = new IndicesAliasesClusterStateUpdateRequest(unmodifiableList(finalActions)) @@ -227,7 +227,7 @@ private static String[] concreteAliases(IndicesAliasesRequest.AliasActions actio finalAliases.add(aliasMeta.alias()); } } - return finalAliases.toArray(new String[finalAliases.size()]); + return finalAliases.toArray(new String[0]); } else { // for ADD and REMOVE_INDEX we just return the current aliases return action.aliases(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java index a3d14846338e7..bcf2f6f534d74 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -128,7 +128,7 @@ protected void clusterManagerOperation( DeleteIndexClusterStateUpdateRequest deleteRequest = new DeleteIndexClusterStateUpdateRequest().ackTimeout(request.timeout()) .masterNodeTimeout(request.clusterManagerNodeTimeout()) - .indices(concreteIndices.toArray(new Index[concreteIndices.size()])); + .indices(concreteIndices.toArray(new Index[0])); deleteIndexService.deleteIndices(deleteRequest, new ActionListener() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java index 88973ce094d8b..7249bc5e9d3ba 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndexSegments.java @@ -65,10 +65,7 @@ public class IndexSegments implements Iterable { for (Map.Entry> entry : tmpIndexShards.entrySet()) { indexShards.put( entry.getKey(), - new IndexShardSegments( - entry.getValue().get(0).getShardRouting().shardId(), - entry.getValue().toArray(new ShardSegments[entry.getValue().size()]) - ) + new IndexShardSegments(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardSegments[0])) ); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java index e21616657502a..f2fef57c64a25 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/IndicesSegmentResponse.java @@ -97,7 +97,7 @@ public Map getIndices() { shards.add(shard); } } - indicesSegments.put(indexName, new IndexSegments(indexName, shards.toArray(new ShardSegments[shards.size()]))); + indicesSegments.put(indexName, new IndexSegments(indexName, shards.toArray(new ShardSegments[0]))); } this.indicesSegments = indicesSegments; return indicesSegments; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java index de0d390cddc4a..6e943cd9e5e7b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/PitSegmentsRequest.java @@ -64,7 +64,7 @@ public void setVerbose(boolean v) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeStringArrayNullable((pitIds == null) ? null : pitIds.toArray(new String[pitIds.size()])); + out.writeStringArrayNullable((pitIds == null) ? null : pitIds.toArray(new String[0])); out.writeBoolean(verbose); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java index eb3d64188a6e7..163bd745cf029 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportIndicesSegmentsAction.java @@ -119,7 +119,7 @@ protected IndicesSegmentResponse newResponse( ClusterState clusterState ) { return new IndicesSegmentResponse( - results.toArray(new ShardSegments[results.size()]), + results.toArray(new ShardSegments[0]), totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java index 9d4ece74a7270..09a685a2158f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/segments/TransportPitSegmentsAction.java @@ -167,7 +167,7 @@ protected IndicesSegmentResponse newResponse( ClusterState clusterState ) { return new IndicesSegmentResponse( - results.toArray(new ShardSegments[results.size()]), + results.toArray(new ShardSegments[0]), totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 62ebbba500a6a..6b819ac34cef9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -146,7 +146,7 @@ public boolean anySet() { } public Flag[] getFlags() { - return flags.toArray(new Flag[flags.size()]); + return flags.toArray(new Flag[0]); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java index c98d46a0caed6..1c57ca39576b0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/IndexStats.java @@ -88,10 +88,7 @@ public Map getIndexShards() { for (Map.Entry> entry : tmpIndexShards.entrySet()) { indexShards.put( entry.getKey(), - new IndexShardStats( - entry.getValue().get(0).getShardRouting().shardId(), - entry.getValue().toArray(new ShardStats[entry.getValue().size()]) - ) + new IndexShardStats(entry.getValue().get(0).getShardRouting().shardId(), entry.getValue().toArray(new ShardStats[0])) ); } return indexShards; @@ -153,7 +150,7 @@ public IndexStatsBuilder add(ShardStats shardStats) { } public IndexStats build() { - return new IndexStats(indexName, uuid, shards.toArray(new ShardStats[shards.size()])); + return new IndexStats(indexName, uuid, shards.toArray(new ShardStats[0])); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java index 39e0338aac5f6..429c6630c6c06 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -120,13 +120,7 @@ protected IndicesStatsResponse newResponse( List shardFailures, ClusterState clusterState ) { - return new IndicesStatsResponse( - responses.toArray(new ShardStats[responses.size()]), - totalShards, - successfulShards, - failedShards, - shardFailures - ); + return new IndicesStatsResponse(responses.toArray(new ShardStats[0]), totalShards, successfulShards, failedShards, shardFailures); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 3cfb1a3ed0637..d5cdfe41045dd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -476,7 +476,7 @@ public PutIndexTemplateRequest alias(Alias alias) { @Override public String[] indices() { - return indexPatterns.toArray(new String[indexPatterns.size()]); + return indexPatterns.toArray(new String[0]); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java index 2cff1f04d3fd2..4513a321e2a51 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/IndexUpgradeStatus.java @@ -67,7 +67,7 @@ public class IndexUpgradeStatus implements Iterable { entry.getKey(), new IndexShardUpgradeStatus( entry.getValue().get(0).getShardRouting().shardId(), - entry.getValue().toArray(new ShardUpgradeStatus[entry.getValue().size()]) + entry.getValue().toArray(new ShardUpgradeStatus[0]) ) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java index b2f6cd62b1be7..c81d86660ae50 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/TransportUpgradeStatusAction.java @@ -121,7 +121,7 @@ protected UpgradeStatusResponse newResponse( ClusterState clusterState ) { return new UpgradeStatusResponse( - responses.toArray(new ShardUpgradeStatus[responses.size()]), + responses.toArray(new ShardUpgradeStatus[0]), totalShards, successfulShards, failedShards, diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java index 311f86b0fe3b9..0891597b3f2b5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/get/UpgradeStatusResponse.java @@ -94,7 +94,7 @@ public Map getIndices() { shards.add(shard); } } - indicesUpgradeStats.put(indexName, new IndexUpgradeStatus(indexName, shards.toArray(new ShardUpgradeStatus[shards.size()]))); + indicesUpgradeStats.put(indexName, new IndexUpgradeStatus(indexName, shards.toArray(new ShardUpgradeStatus[0]))); } this.indicesUpgradeStatus = indicesUpgradeStats; return indicesUpgradeStats; diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java index 6dec144f2ccf0..3489f8a90d6e4 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkResponse.java @@ -214,6 +214,6 @@ public static BulkResponse fromXContent(XContentParser parser) throws IOExceptio throwUnknownToken(token, parser.getTokenLocation()); } } - return new BulkResponse(items.toArray(new BulkItemResponse[items.size()]), took, ingestTook); + return new BulkResponse(items.toArray(new BulkItemResponse[0]), took, ingestTook); } } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 9e23213c02ab6..862d63aeedf53 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -609,7 +609,7 @@ protected void doRun() { BulkShardRequest bulkShardRequest = new BulkShardRequest( shardId, bulkRequest.getRefreshPolicy(), - requests.toArray(new BulkItemRequest[requests.size()]) + requests.toArray(new BulkItemRequest[0]) ); bulkShardRequest.waitForActiveShards(bulkRequest.waitForActiveShards()); bulkShardRequest.timeout(bulkRequest.timeout()); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 91f506dafafe1..87881071d4ec8 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -509,15 +509,15 @@ private static void parseDocuments( fetchSourceContext = new FetchSourceContext( fetchSourceContext.fetchSource(), - includes == null ? Strings.EMPTY_ARRAY : includes.toArray(new String[includes.size()]), - excludes == null ? Strings.EMPTY_ARRAY : excludes.toArray(new String[excludes.size()]) + includes == null ? Strings.EMPTY_ARRAY : includes.toArray(new String[0]), + excludes == null ? Strings.EMPTY_ARRAY : excludes.toArray(new String[0]) ); } } } String[] aFields; if (storedFields != null) { - aFields = storedFields.toArray(new String[storedFields.size()]); + aFields = storedFields.toArray(new String[0]); } else { aFields = defaultFields; } diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java b/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java index 4105f5ece5221..ebc110302143d 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollRequest.java @@ -101,7 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { if (scrollIds == null) { out.writeVInt(0); } else { - out.writeStringArray(scrollIds.toArray(new String[scrollIds.size()])); + out.writeStringArray(scrollIds.toArray(new String[0])); } } diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 926e9c19a33f5..da4f2a5f77003 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -74,7 +74,7 @@ public void writeTo(StreamOutput out) throws IOException { if (pitIds == null) { out.writeVInt(0); } else { - out.writeStringArray(pitIds.toArray(new String[pitIds.size()])); + out.writeStringArray(pitIds.toArray(new String[0])); } } diff --git a/server/src/main/java/org/opensearch/action/search/PitService.java b/server/src/main/java/org/opensearch/action/search/PitService.java index f42d84477f9a3..079f91db383d1 100644 --- a/server/src/main/java/org/opensearch/action/search/PitService.java +++ b/server/src/main/java/org/opensearch/action/search/PitService.java @@ -176,7 +176,7 @@ public void getAllPits(ActionListener getAllPitsListener DiscoveryNode node = cursor.value; nodes.add(node); } - DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); + DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[0]); GetAllPitNodesRequest getAllPitNodesRequest = new GetAllPitNodesRequest(disNodesArr); transportService.sendRequest( transportService.getLocalNode(), diff --git a/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java index ffa944ae62763..0b477624b15cc 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/opensearch/action/search/SearchScrollAsyncAction.java @@ -243,7 +243,7 @@ synchronized ShardSearchFailure[] buildShardFailures() { // pkg private for test if (shardFailures.isEmpty()) { return ShardSearchFailure.EMPTY_ARRAY; } - return shardFailures.toArray(new ShardSearchFailure[shardFailures.size()]); + return shardFailures.toArray(new ShardSearchFailure[0]); } // we do our best to return the shard failures, but its ok if its not fully concurrently safe diff --git a/server/src/main/java/org/opensearch/action/support/ActionFilters.java b/server/src/main/java/org/opensearch/action/support/ActionFilters.java index 95cafbea4d7c1..cad828e08a25e 100644 --- a/server/src/main/java/org/opensearch/action/support/ActionFilters.java +++ b/server/src/main/java/org/opensearch/action/support/ActionFilters.java @@ -46,7 +46,7 @@ public class ActionFilters { private final ActionFilter[] filters; public ActionFilters(Set actionFilters) { - this.filters = actionFilters.toArray(new ActionFilter[actionFilters.size()]); + this.filters = actionFilters.toArray(new ActionFilter[0]); Arrays.sort(filters, new Comparator() { @Override public int compare(ActionFilter o1, ActionFilter o2) { diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java index 9d0973a5f7307..2135df4fdf726 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastResponse.java @@ -114,7 +114,7 @@ public BroadcastResponse( if (shardFailures == null) { this.shardFailures = EMPTY; } else { - this.shardFailures = shardFailures.toArray(new DefaultShardOperationFailedException[shardFailures.size()]); + this.shardFailures = shardFailures.toArray(new DefaultShardOperationFailedException[0]); } } diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java index 1dfa9e53c5fee..2b4504c94f594 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationResponse.java @@ -222,7 +222,7 @@ public static ShardInfo fromXContent(XContentParser parser) throws IOException { } Failure[] failures = EMPTY; if (failuresList != null) { - failures = failuresList.toArray(new Failure[failuresList.size()]); + failures = failuresList.toArray(new Failure[0]); } return new ShardInfo(total, successful, failures); } diff --git a/server/src/main/java/org/opensearch/cluster/MergableCustomMetadata.java b/server/src/main/java/org/opensearch/cluster/MergableCustomMetadata.java deleted file mode 100644 index b9898e79e3e6f..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/MergableCustomMetadata.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster; - -import org.opensearch.cluster.metadata.Metadata; - -/** - * Interface to allow merging {@link org.opensearch.cluster.metadata.Metadata.Custom}. - * When multiple Mergable Custom metadata of the same type are found (from underlying clusters), the - * Custom metadata can be merged using {@link #merge(Metadata.Custom)}. - * - * @param type of custom meta data - * - * @opensearch.internal - */ -public interface MergableCustomMetadata { - - /** - * Merges this custom metadata with other, returning either this or other custom metadata. - * This method should not mutate either this or the other custom metadata. - * - * @param other custom meta data - * @return the same instance or other custom metadata based on implementation - * if both the instances are considered equal, implementations should return this - * instance to avoid redundant cluster state changes. - */ - T merge(T other); -} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index a66152b8016ee..010e9f47ed39b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -142,7 +142,7 @@ public class JoinHelper { this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); this.nodeCommissioned = nodeCommissioned; - this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService, transportService) { + this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService) { private final long term = currentTermSupplier.getAsLong(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 4cb6c7b255449..02f3828e0e4c5 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -48,7 +48,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.persistent.PersistentTasksCustomMetadata; -import org.opensearch.transport.TransportService; import java.util.ArrayList; import java.util.Collection; @@ -74,7 +73,6 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor concreteIndices, String[] originalPatterns) { @@ -598,7 +598,7 @@ public String[] indexAliases( if (aliases == null) { return null; } - return aliases.toArray(new String[aliases.size()]); + return aliases.toArray(new String[0]); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java index 17267d5474738..118d2998bae67 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingNodes.java @@ -1139,7 +1139,7 @@ public void shuffle() { */ public ShardRouting[] drain() { nodes.ensureMutable(); - ShardRouting[] mutableShardRoutings = unassigned.toArray(new ShardRouting[unassigned.size()]); + ShardRouting[] mutableShardRoutings = unassigned.toArray(new ShardRouting[0]); unassigned.clear(); primaries = 0; return mutableShardRoutings; @@ -1151,7 +1151,7 @@ public ShardRouting[] drain() { */ public ShardRouting[] drainIgnored() { nodes.ensureMutable(); - ShardRouting[] mutableShardRoutings = ignored.toArray(new ShardRouting[ignored.size()]); + ShardRouting[] mutableShardRoutings = ignored.toArray(new ShardRouting[0]); ignored.clear(); ignoredPrimaries = 0; return mutableShardRoutings; diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java index 1c6e4732a2ab7..5169e63aeb9a5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/AllocationService.java @@ -345,7 +345,7 @@ public ClusterState adaptAutoExpandReplicas(ClusterState clusterState) { final Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); for (Map.Entry> entry : autoExpandReplicaChanges.entrySet()) { final int numberOfReplicas = entry.getKey(); - final String[] indices = entry.getValue().toArray(new String[entry.getValue().size()]); + final String[] indices = entry.getValue().toArray(new String[0]); // we do *not* update the in sync allocation ids as they will be removed upon the first index // operation which make these copies stale routingTableBuilder.updateNumberOfReplicas(numberOfReplicas, indices); diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 3c5e4013748af..8570a16fd690c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -90,7 +90,7 @@ public LocalShardsBalancer( * Returns an array view on the nodes in the balancer. Nodes should not be removed from this list. */ private BalancedShardsAllocator.ModelNode[] nodesArray() { - return nodes.values().toArray(new BalancedShardsAllocator.ModelNode[nodes.size()]); + return nodes.values().toArray(new BalancedShardsAllocator.ModelNode[0]); } /** @@ -778,7 +778,7 @@ void allocateUnassigned() { .collect(Collectors.toList()); allUnassignedShards.removeAll(localUnassignedShards); allUnassignedShards.forEach(shard -> routingNodes.unassigned().add(shard)); - unassignedShards = localUnassignedShards.toArray(new ShardRouting[localUnassignedShards.size()]); + unassignedShards = localUnassignedShards.toArray(new ShardRouting[0]); } ShardRouting[] primary = unassignedShards; ShardRouting[] secondary = new ShardRouting[primary.length]; diff --git a/server/src/main/java/org/opensearch/common/Classes.java b/server/src/main/java/org/opensearch/common/Classes.java index 1b297639aff6a..1fb7fde5f963b 100644 --- a/server/src/main/java/org/opensearch/common/Classes.java +++ b/server/src/main/java/org/opensearch/common/Classes.java @@ -41,25 +41,6 @@ */ public class Classes { - /** - * The package separator character '.' - */ - private static final char PACKAGE_SEPARATOR = '.'; - - /** - * Determine the name of the package of the given class: - * e.g. "java.lang" for the java.lang.String class. - * - * @param clazz the class - * @return the package name, or the empty String if the class - * is defined in the default package - */ - public static String getPackageName(Class clazz) { - String className = clazz.getName(); - int lastDotIndex = className.lastIndexOf(PACKAGE_SEPARATOR); - return (lastDotIndex != -1 ? className.substring(0, lastDotIndex) : ""); - } - public static boolean isInnerClass(Class clazz) { return !Modifier.isStatic(clazz.getModifiers()) && clazz.getEnclosingClass() != null; } diff --git a/server/src/main/java/org/opensearch/common/LegacyTimeBasedUUIDGenerator.java b/server/src/main/java/org/opensearch/common/LegacyTimeBasedUUIDGenerator.java deleted file mode 100644 index 1e2d9b87281d6..0000000000000 --- a/server/src/main/java/org/opensearch/common/LegacyTimeBasedUUIDGenerator.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common; - -import java.util.Base64; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * These are essentially flake ids, but we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. - * For more information about flake ids, check out - * https://archive.fo/2015.07.08-082503/http://www.boundary.com/blog/2012/01/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang/ - * - * @opensearch.internal - */ - -class LegacyTimeBasedUUIDGenerator implements UUIDGenerator { - - // We only use bottom 3 bytes for the sequence number. Paranoia: init with random int so that if JVM/OS/machine goes down, clock slips - // backwards, and JVM comes back up, we are less likely to be on the same sequenceNumber at the same time: - private final AtomicInteger sequenceNumber = new AtomicInteger(SecureRandomHolder.INSTANCE.nextInt()); - - // Used to ensure clock moves forward: - private long lastTimestamp; - - private static final byte[] SECURE_MUNGED_ADDRESS = MacAddressProvider.getSecureMungedAddress(); - - static { - assert SECURE_MUNGED_ADDRESS.length == 6; - } - - /** Puts the lower numberOfLongBytes from l into the array, starting index pos. */ - private static void putLong(byte[] array, long l, int pos, int numberOfLongBytes) { - for (int i = 0; i < numberOfLongBytes; ++i) { - array[pos + numberOfLongBytes - i - 1] = (byte) (l >>> (i * 8)); - } - } - - @Override - public String getBase64UUID() { - final int sequenceId = sequenceNumber.incrementAndGet() & 0xffffff; - long timestamp = System.currentTimeMillis(); - - synchronized (this) { - // Don't let timestamp go backwards, at least "on our watch" (while this JVM is running). We are still vulnerable if we are - // shut down, clock goes backwards, and we restart... for this we randomize the sequenceNumber on init to decrease chance of - // collision: - timestamp = Math.max(lastTimestamp, timestamp); - - if (sequenceId == 0) { - // Always force the clock to increment whenever sequence number is 0, in case we have a long time-slip backwards: - timestamp++; - } - - lastTimestamp = timestamp; - } - - final byte[] uuidBytes = new byte[15]; - - // Only use lower 6 bytes of the timestamp (this will suffice beyond the year 10000): - putLong(uuidBytes, timestamp, 0, 6); - - // MAC address adds 6 bytes: - System.arraycopy(SECURE_MUNGED_ADDRESS, 0, uuidBytes, 6, SECURE_MUNGED_ADDRESS.length); - - // Sequence number adds 3 bytes: - putLong(uuidBytes, sequenceId, 12, 3); - - assert 9 + SECURE_MUNGED_ADDRESS.length == uuidBytes.length; - - return Base64.getUrlEncoder().withoutPadding().encodeToString(uuidBytes); - } -} diff --git a/server/src/main/java/org/opensearch/common/Numbers.java b/server/src/main/java/org/opensearch/common/Numbers.java index 7a87cd58b0e29..dbcde890e8fe2 100644 --- a/server/src/main/java/org/opensearch/common/Numbers.java +++ b/server/src/main/java/org/opensearch/common/Numbers.java @@ -57,28 +57,6 @@ public static long bytesToLong(BytesRef bytes) { return (((long) high) << 32) | (low & 0x0ffffffffL); } - public static byte[] intToBytes(int val) { - byte[] arr = new byte[4]; - arr[0] = (byte) (val >>> 24); - arr[1] = (byte) (val >>> 16); - arr[2] = (byte) (val >>> 8); - arr[3] = (byte) (val); - return arr; - } - - /** - * Converts an int to a byte array. - * - * @param val The int to convert to a byte array - * @return The byte array converted - */ - public static byte[] shortToBytes(int val) { - byte[] arr = new byte[2]; - arr[0] = (byte) (val >>> 8); - arr[1] = (byte) (val); - return arr; - } - /** * Converts a long to a byte array. * @@ -98,16 +76,6 @@ public static byte[] longToBytes(long val) { return arr; } - /** - * Converts a double to a byte array. - * - * @param val The double to convert to a byte array - * @return The byte array converted - */ - public static byte[] doubleToBytes(double val) { - return longToBytes(Double.doubleToRawLongBits(val)); - } - /** Returns true if value is neither NaN nor infinite. */ public static boolean isValidDouble(double value) { if (Double.isNaN(value) || Double.isInfinite(value)) { diff --git a/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java b/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java index fdc53d8335c2f..f83ef930688f8 100644 --- a/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java +++ b/server/src/main/java/org/opensearch/common/RandomBasedUUIDGenerator.java @@ -32,9 +32,6 @@ package org.opensearch.common; -import org.opensearch.common.settings.SecureString; - -import java.util.Arrays; import java.util.Base64; import java.util.Random; @@ -54,27 +51,6 @@ public String getBase64UUID() { return getBase64UUID(SecureRandomHolder.INSTANCE); } - /** - * Returns a Base64 encoded {@link SecureString} of a Version 4.0 compatible UUID - * as defined here: http://www.ietf.org/rfc/rfc4122.txt - */ - public SecureString getBase64UUIDSecureString() { - byte[] uuidBytes = null; - byte[] encodedBytes = null; - try { - uuidBytes = getUUIDBytes(SecureRandomHolder.INSTANCE); - encodedBytes = Base64.getUrlEncoder().withoutPadding().encode(uuidBytes); - return new SecureString(CharArrays.utf8BytesToChars(encodedBytes)); - } finally { - if (uuidBytes != null) { - Arrays.fill(uuidBytes, (byte) 0); - } - if (encodedBytes != null) { - Arrays.fill(encodedBytes, (byte) 0); - } - } - } - /** * Returns a Base64 encoded version of a Version 4.0 compatible UUID * randomly initialized by the given {@link java.util.Random} instance diff --git a/server/src/main/java/org/opensearch/common/Strings.java b/server/src/main/java/org/opensearch/common/Strings.java index cbf12f264ee30..7ec053522c5a6 100644 --- a/server/src/main/java/org/opensearch/common/Strings.java +++ b/server/src/main/java/org/opensearch/common/Strings.java @@ -80,67 +80,6 @@ public static void spaceify(int spaces, String from, StringBuilder to) throws Ex } } - /** - * Splits a backslash escaped string on the separator. - *

- * Current backslash escaping supported: - *
\n \t \r \b \f are escaped the same as a Java String - *
Other characters following a backslash are produced verbatim (\c => c) - * - * @param s the string to split - * @param separator the separator to split on - * @param decode decode backslash escaping - */ - public static List splitSmart(String s, String separator, boolean decode) { - ArrayList lst = new ArrayList<>(2); - StringBuilder sb = new StringBuilder(); - int pos = 0, end = s.length(); - while (pos < end) { - if (s.startsWith(separator, pos)) { - if (sb.length() > 0) { - lst.add(sb.toString()); - sb = new StringBuilder(); - } - pos += separator.length(); - continue; - } - - char ch = s.charAt(pos++); - if (ch == '\\') { - if (!decode) sb.append(ch); - if (pos >= end) break; // ERROR, or let it go? - ch = s.charAt(pos++); - if (decode) { - switch (ch) { - case 'n': - ch = '\n'; - break; - case 't': - ch = '\t'; - break; - case 'r': - ch = '\r'; - break; - case 'b': - ch = '\b'; - break; - case 'f': - ch = '\f'; - break; - } - } - } - - sb.append(ch); - } - - if (sb.length() > 0) { - lst.add(sb.toString()); - } - - return lst; - } - // --------------------------------------------------------------------- // General convenience methods for working with Strings // --------------------------------------------------------------------- @@ -303,7 +242,7 @@ public static String replace(String inString, String oldPattern, String newPatte // the index of an occurrence we've found, or -1 int patLen = oldPattern.length(); while (index >= 0) { - sb.append(inString.substring(pos, index)); + sb.append(inString, pos, index); sb.append(newPattern); pos = index + patLen; index = inString.indexOf(oldPattern, pos); @@ -423,7 +362,7 @@ public static String[] toStringArray(Collection collection) { if (collection == null) { return null; } - return collection.toArray(new String[collection.size()]); + return collection.toArray(new String[0]); } /** @@ -875,10 +814,6 @@ public static boolean isNullOrEmpty(@Nullable String s) { return s == null || s.isEmpty(); } - public static String coalesceToEmpty(@Nullable String s) { - return s == null ? "" : s; - } - public static String padStart(String s, int minimumLength, char c) { if (s == null) { throw new NullPointerException("s"); diff --git a/server/src/main/java/org/opensearch/common/UUIDs.java b/server/src/main/java/org/opensearch/common/UUIDs.java index a04a10430254f..c7d14878e8bd4 100644 --- a/server/src/main/java/org/opensearch/common/UUIDs.java +++ b/server/src/main/java/org/opensearch/common/UUIDs.java @@ -32,8 +32,6 @@ package org.opensearch.common; -import org.opensearch.common.settings.SecureString; - import java.util.Random; /** @@ -44,7 +42,6 @@ public class UUIDs { private static final RandomBasedUUIDGenerator RANDOM_UUID_GENERATOR = new RandomBasedUUIDGenerator(); - private static final UUIDGenerator LEGACY_TIME_UUID_GENERATOR = new LegacyTimeBasedUUIDGenerator(); private static final UUIDGenerator TIME_UUID_GENERATOR = new TimeBasedUUIDGenerator(); /** Generates a time-based UUID (similar to Flake IDs), which is preferred when generating an ID to be indexed into a Lucene index as @@ -53,11 +50,6 @@ public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } - /** Legacy implementation of {@link #base64UUID()}, for pre 6.0 indices. */ - public static String legacyBase64UUID() { - return LEGACY_TIME_UUID_GENERATOR.getBase64UUID(); - } - /** Returns a Base64 encoded version of a Version 4.0 compatible UUID as defined here: http://www.ietf.org/rfc/rfc4122.txt, using the * provided {@code Random} instance */ public static String randomBase64UUID(Random random) { @@ -70,9 +62,4 @@ public static String randomBase64UUID() { return RANDOM_UUID_GENERATOR.getBase64UUID(); } - /** Returns a Base64 encoded {@link SecureString} of a Version 4.0 compatible UUID as defined here: http://www.ietf.org/rfc/rfc4122.txt, - * using a private {@code SecureRandom} instance */ - public static SecureString randomBase64UUIDSecureString() { - return RANDOM_UUID_GENERATOR.getBase64UUIDSecureString(); - } } diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java index d5242fd5e7347..c54536e9c46e2 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobPath.java @@ -68,7 +68,7 @@ public Iterator iterator() { } public String[] toArray() { - return paths.toArray(new String[paths.size()]); + return paths.toArray(new String[0]); } public BlobPath add(String path) { diff --git a/server/src/main/java/org/opensearch/common/bytes/BytesReference.java b/server/src/main/java/org/opensearch/common/bytes/BytesReference.java index 3e0623bf8d128..85dcf949d479e 100644 --- a/server/src/main/java/org/opensearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/opensearch/common/bytes/BytesReference.java @@ -91,7 +91,7 @@ static ByteBuffer[] toByteBuffers(BytesReference reference) { while ((r = byteRefIterator.next()) != null) { buffers.add(ByteBuffer.wrap(r.bytes, r.offset, r.length)); } - return buffers.toArray(new ByteBuffer[buffers.size()]); + return buffers.toArray(new ByteBuffer[0]); } catch (IOException e) { // this is really an error since we don't do IO in our bytesreferences diff --git a/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java b/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java index bf2192a28a299..797b252215d76 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoPolygonDecomposer.java @@ -463,7 +463,7 @@ private static Edge[] edges(Edge[] edges, int numHoles, List> comp } } - return mainEdges.toArray(new Edge[mainEdges.size()]); + return mainEdges.toArray(new Edge[0]); } private static void compose(Edge[] edges, Edge[] holes, int numHoles, List collector) { diff --git a/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java index fc9548a4d9072..0f3d357269392 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/LineStringBuilder.java @@ -121,7 +121,7 @@ public int numDimensions() { @Override public JtsGeometry buildS4J() { - Coordinate[] coordinates = this.coordinates.toArray(new Coordinate[this.coordinates.size()]); + Coordinate[] coordinates = this.coordinates.toArray(new Coordinate[0]); Geometry geometry; if (wrapdateline) { ArrayList strings = decomposeS4J(FACTORY, coordinates, new ArrayList()); @@ -129,7 +129,7 @@ public JtsGeometry buildS4J() { if (strings.size() == 1) { geometry = strings.get(0); } else { - LineString[] linestrings = strings.toArray(new LineString[strings.size()]); + LineString[] linestrings = strings.toArray(new LineString[0]); geometry = FACTORY.createMultiLineString(linestrings); } diff --git a/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java index 64ca4aae326c9..53da2b9d87916 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/MultiLineStringBuilder.java @@ -153,7 +153,7 @@ public JtsGeometry buildS4J() { if (parts.size() == 1) { geometry = parts.get(0); } else { - LineString[] lineStrings = parts.toArray(new LineString[parts.size()]); + LineString[] lineStrings = parts.toArray(new LineString[0]); geometry = FACTORY.createMultiLineString(lineStrings); } } else { diff --git a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java index 0341d2a1af325..9bfb6cac3132e 100644 --- a/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/opensearch/common/geo/builders/PolygonBuilder.java @@ -308,7 +308,7 @@ protected static org.opensearch.geometry.LinearRing linearRing(List } protected static LinearRing linearRingS4J(GeometryFactory factory, List coordinates) { - return factory.createLinearRing(coordinates.toArray(new Coordinate[coordinates.size()])); + return factory.createLinearRing(coordinates.toArray(new Coordinate[0])); } @Override @@ -506,7 +506,7 @@ private static Edge[] edges(Edge[] edges, int numHoles, List> } } - return mainEdges.toArray(new Edge[mainEdges.size()]); + return mainEdges.toArray(new Edge[0]); } private static Coordinate[][][] compose(Edge[] edges, Edge[] holes, int numHoles) { diff --git a/server/src/main/java/org/opensearch/common/inject/PreProcessModule.java b/server/src/main/java/org/opensearch/common/inject/PreProcessModule.java deleted file mode 100644 index e34e564d8e253..0000000000000 --- a/server/src/main/java/org/opensearch/common/inject/PreProcessModule.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.inject; - -/** - * A module can implement this interface to allow to pre process other modules - * before an injector is created. - * - * @opensearch.internal - */ -public interface PreProcessModule { - - void processModule(Module module); -} diff --git a/server/src/main/java/org/opensearch/common/inject/internal/ErrorHandler.java b/server/src/main/java/org/opensearch/common/inject/internal/ErrorHandler.java deleted file mode 100644 index 283c8c468401b..0000000000000 --- a/server/src/main/java/org/opensearch/common/inject/internal/ErrorHandler.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Copyright (C) 2006 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.inject.internal; - -import org.opensearch.common.inject.spi.Message; - -/** - * Handles errors in the Injector. - * - * @author crazybob@google.com (Bob Lee) - * - * @opensearch.internal - */ -public interface ErrorHandler { - - /** - * Handles an error. - */ - void handle(Object source, Errors errors); - - /** - * Handles a user-reported error. - */ - void handle(Message message); -} diff --git a/server/src/main/java/org/opensearch/common/inject/internal/UniqueAnnotations.java b/server/src/main/java/org/opensearch/common/inject/internal/UniqueAnnotations.java deleted file mode 100644 index 3c037f7cd552a..0000000000000 --- a/server/src/main/java/org/opensearch/common/inject/internal/UniqueAnnotations.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.inject.internal; - -import org.opensearch.common.inject.BindingAnnotation; - -import java.lang.annotation.Annotation; -import java.lang.annotation.Retention; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -/** - * Unique annotations. - * - * @author jessewilson@google.com (Jesse Wilson) - * - * @opensearch.internal - */ -public class UniqueAnnotations { - private UniqueAnnotations() {} - - private static final AtomicInteger nextUniqueValue = new AtomicInteger(1); - - /** - * Returns an annotation instance that is not equal to any other annotation - * instances, for use in creating distinct {@link org.opensearch.common.inject.Key}s. - */ - public static Annotation create() { - return create(nextUniqueValue.getAndIncrement()); - } - - static Annotation create(final int value) { - return new Internal() { - @Override - public int value() { - return value; - } - - @Override - public Class annotationType() { - return Internal.class; - } - - @Override - public String toString() { - return "@" + Internal.class.getName() + "(value=" + value + ")"; - } - - @Override - public boolean equals(Object o) { - return o instanceof Internal && ((Internal) o).value() == value(); - } - - @Override - public int hashCode() { - return (127 * "value".hashCode()) ^ value; - } - }; - } - - @Retention(RUNTIME) - @BindingAnnotation - @interface Internal { - int value(); - } -} diff --git a/server/src/main/java/org/opensearch/common/inject/spi/DefaultBindingTargetVisitor.java b/server/src/main/java/org/opensearch/common/inject/spi/DefaultBindingTargetVisitor.java deleted file mode 100644 index f66199736740f..0000000000000 --- a/server/src/main/java/org/opensearch/common/inject/spi/DefaultBindingTargetVisitor.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Copyright (C) 2008 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.inject.spi; - -import org.opensearch.common.inject.Binding; - -/** - * No-op visitor for subclassing. All interface methods simply delegate to {@link - * #visitOther(Binding)}, returning its result. - * - * @param any type to be returned by the visit method. Use {@link Void} with - * {@code return null} if no return type is needed. - * @author jessewilson@google.com (Jesse Wilson) - * @since 2.0 - * - * @opensearch.internal - */ -public abstract class DefaultBindingTargetVisitor implements BindingTargetVisitor { - - /** - * Default visit implementation. Returns {@code null}. - */ - protected V visitOther(Binding binding) { - return null; - } - - @Override - public V visit(InstanceBinding instanceBinding) { - return visitOther(instanceBinding); - } - - @Override - public V visit(ProviderInstanceBinding providerInstanceBinding) { - return visitOther(providerInstanceBinding); - } - - @Override - public V visit(ProviderKeyBinding providerKeyBinding) { - return visitOther(providerKeyBinding); - } - - @Override - public V visit(LinkedKeyBinding linkedKeyBinding) { - return visitOther(linkedKeyBinding); - } - - @Override - public V visit(ExposedBinding exposedBinding) { - return visitOther(exposedBinding); - } - - @Override - public V visit(UntargettedBinding untargettedBinding) { - return visitOther(untargettedBinding); - } - - @Override - public V visit(ConstructorBinding constructorBinding) { - return visitOther(constructorBinding); - } - - @Override - public V visit(ConvertedConstantBinding convertedConstantBinding) { - return visitOther(convertedConstantBinding); - } - - // javac says it's an error to cast ProviderBinding to Binding - @Override - public V visit(ProviderBinding providerBinding) { - return visitOther(providerBinding); - } -} diff --git a/server/src/main/java/org/opensearch/common/logging/HeaderWarning.java b/server/src/main/java/org/opensearch/common/logging/HeaderWarning.java index df5a94ccd34e4..44eefe1e78580 100644 --- a/server/src/main/java/org/opensearch/common/logging/HeaderWarning.java +++ b/server/src/main/java/org/opensearch/common/logging/HeaderWarning.java @@ -60,8 +60,7 @@ public class HeaderWarning { * Regular expression to test if a string matches the RFC7234 specification for warning headers. This pattern assumes that the warn code * is always 299. Further, this pattern assumes that the warn agent represents a version of OpenSearch including the build hash. */ - public static final Pattern WARNING_HEADER_PATTERN = Pattern.compile("299 " + // warn code - "(?:Elasticsearch-|OpenSearch-)" + // warn agent (note: Elasticsearch needed for bwc mixedCluster testing) + public static final Pattern WARNING_HEADER_PATTERN = Pattern.compile("299 OpenSearch-" + // warn code "\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-" + // warn agent "(?:[a-f0-9]{7}(?:[a-f0-9]{33})?|unknown) " + // warn agent "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\"( " + // quoted warning value, captured diff --git a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java index c0405f9e52b77..4438bf53fd62c 100644 --- a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java @@ -66,9 +66,7 @@ import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; -import java.util.Collections; import java.util.EnumSet; -import java.util.HashSet; import java.util.List; import java.util.Objects; import java.util.Set; @@ -182,7 +180,6 @@ private static void configure(final Settings settings, final Path configsPath, f final LoggerContext context = (LoggerContext) LogManager.getContext(false); - final Set locationsWithDeprecatedPatterns = Collections.synchronizedSet(new HashSet<>()); final List configurations = new ArrayList<>(); final PropertiesConfigurationFactory factory = new PropertiesConfigurationFactory(); final Set options = EnumSet.of(FileVisitOption.FOLLOW_LINKS); @@ -206,12 +203,8 @@ public FileVisitResult visitFile(final Path file, final BasicFileAttributes attr // Redirect stdout/stderr to log4j. While we ensure Elasticsearch code does not write to those streams, // third party libraries may do that - System.setOut( - new PrintStream(new LoggingOutputStream(LogManager.getLogger("stdout"), Level.INFO), false, StandardCharsets.UTF_8.name()) - ); - System.setErr( - new PrintStream(new LoggingOutputStream(LogManager.getLogger("stderr"), Level.WARN), false, StandardCharsets.UTF_8.name()) - ); + System.setOut(new PrintStream(new LoggingOutputStream(LogManager.getLogger("stdout"), Level.INFO), false, StandardCharsets.UTF_8)); + System.setErr(new PrintStream(new LoggingOutputStream(LogManager.getLogger("stderr"), Level.WARN), false, StandardCharsets.UTF_8)); } private static void configureStatusLogger() { diff --git a/server/src/main/java/org/opensearch/common/logging/LoggerMessageFormat.java b/server/src/main/java/org/opensearch/common/logging/LoggerMessageFormat.java index ad9981809ae3a..a0dec5b8d0b70 100644 --- a/server/src/main/java/org/opensearch/common/logging/LoggerMessageFormat.java +++ b/server/src/main/java/org/opensearch/common/logging/LoggerMessageFormat.java @@ -79,34 +79,34 @@ public static String format(final String prefix, final String messagePattern, fi return messagePattern; } else { // add the tail string which contains no variables and return // the result. - sbuf.append(messagePattern.substring(i, messagePattern.length())); + sbuf.append(messagePattern.substring(i)); return sbuf.toString(); } } else { if (isEscapedDelimiter(messagePattern, j)) { if (!isDoubleEscaped(messagePattern, j)) { L--; // DELIM_START was escaped, thus should not be incremented - sbuf.append(messagePattern.substring(i, j - 1)); + sbuf.append(messagePattern, i, j - 1); sbuf.append(DELIM_START); i = j + 1; } else { // The escape character preceding the delimiter start is // itself escaped: "abc x:\\{}" // we have to consume one backward slash - sbuf.append(messagePattern.substring(i, j - 1)); - deeplyAppendParameter(sbuf, argArray[L], new HashSet()); + sbuf.append(messagePattern, i, j - 1); + deeplyAppendParameter(sbuf, argArray[L], new HashSet<>()); i = j + 2; } } else { // normal case - sbuf.append(messagePattern.substring(i, j)); - deeplyAppendParameter(sbuf, argArray[L], new HashSet()); + sbuf.append(messagePattern, i, j); + deeplyAppendParameter(sbuf, argArray[L], new HashSet<>()); i = j + 2; } } } // append the characters following the last {} pair. - sbuf.append(messagePattern.substring(i, messagePattern.length())); + sbuf.append(messagePattern.substring(i)); return sbuf.toString(); } @@ -116,19 +116,11 @@ static boolean isEscapedDelimiter(String messagePattern, int delimiterStartIndex return false; } char potentialEscape = messagePattern.charAt(delimiterStartIndex - 1); - if (potentialEscape == ESCAPE_CHAR) { - return true; - } else { - return false; - } + return potentialEscape == ESCAPE_CHAR; } static boolean isDoubleEscaped(String messagePattern, int delimiterStartIndex) { - if (delimiterStartIndex >= 2 && messagePattern.charAt(delimiterStartIndex - 2) == ESCAPE_CHAR) { - return true; - } else { - return false; - } + return delimiterStartIndex >= 2 && messagePattern.charAt(delimiterStartIndex - 2) == ESCAPE_CHAR; } private static void deeplyAppendParameter(StringBuilder sbuf, Object o, Set seen) { diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 7b69dff020bc4..66a18ee0bddfb 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -32,20 +32,14 @@ package org.opensearch.common.lucene; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.WhitespaceAnalyzer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.document.LatLonDocValuesField; import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.FieldInfo; -import org.apache.lucene.index.FieldInfos; -import org.apache.lucene.index.Fields; import org.apache.lucene.index.FilterCodecReader; import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.FilterLeafReader; @@ -55,21 +49,12 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.LeafMetaData; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.PointValues; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.index.SortedSetDocValues; -import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.index.Terms; -import org.apache.lucene.index.VectorValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldDoc; @@ -142,18 +127,6 @@ public class Lucene { private Lucene() {} - public static Version parseVersion(@Nullable String version, Version defaultVersion, Logger logger) { - if (version == null) { - return defaultVersion; - } - try { - return Version.parse(version); - } catch (ParseException e) { - logger.warn(() -> new ParameterizedMessage("no version match {}, default to {}", version, defaultVersion), e); - return defaultVersion; - } - } - /** * Reads the segments infos, failing if it fails to load */ @@ -697,34 +670,6 @@ public static boolean indexExists(final Directory directory) throws IOException return DirectoryReader.indexExists(directory); } - /** - * Wait for an index to exist for up to {@code timeLimitMillis}. Returns - * true if the index eventually exists, false if not. - * - * Will retry the directory every second for at least {@code timeLimitMillis} - */ - public static boolean waitForIndex(final Directory directory, final long timeLimitMillis) throws IOException { - final long DELAY = 1000; - long waited = 0; - try { - while (true) { - if (waited >= timeLimitMillis) { - break; - } - if (indexExists(directory)) { - return true; - } - Thread.sleep(DELAY); - waited += DELAY; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - return false; - } - // one more try after all retries - return indexExists(directory); - } - /** * Returns {@code true} iff the given exception or * one of it's causes is an instance of {@link CorruptIndexException}, @@ -1024,92 +969,4 @@ public static NumericDocValuesField newSoftDeletesField() { return new NumericDocValuesField(SOFT_DELETES_FIELD, 1); } - /** - * Returns an empty leaf reader with the given max docs. The reader will be fully deleted. - */ - public static LeafReader emptyReader(final int maxDoc) { - return new LeafReader() { - final Bits liveDocs = new Bits.MatchNoBits(maxDoc); - - public Terms terms(String field) { - return null; - } - - public NumericDocValues getNumericDocValues(String field) { - return null; - } - - public BinaryDocValues getBinaryDocValues(String field) { - return null; - } - - public SortedDocValues getSortedDocValues(String field) { - return null; - } - - public SortedNumericDocValues getSortedNumericDocValues(String field) { - return null; - } - - public SortedSetDocValues getSortedSetDocValues(String field) { - return null; - } - - public NumericDocValues getNormValues(String field) { - return null; - } - - public FieldInfos getFieldInfos() { - return new FieldInfos(new FieldInfo[0]); - } - - public Bits getLiveDocs() { - return this.liveDocs; - } - - public PointValues getPointValues(String fieldName) { - return null; - } - - public void checkIntegrity() {} - - public Fields getTermVectors(int docID) { - return null; - } - - public int numDocs() { - return 0; - } - - public int maxDoc() { - return maxDoc; - } - - public void document(int docID, StoredFieldVisitor visitor) {} - - protected void doClose() {} - - public LeafMetaData getMetaData() { - return new LeafMetaData(Version.LATEST.major, Version.LATEST, null); - } - - public CacheHelper getCoreCacheHelper() { - return null; - } - - public CacheHelper getReaderCacheHelper() { - return null; - } - - @Override - public VectorValues getVectorValues(String field) throws IOException { - return null; - } - - @Override - public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { - return null; - } - }; - } } diff --git a/server/src/main/java/org/opensearch/common/lucene/index/FilterableTermsEnum.java b/server/src/main/java/org/opensearch/common/lucene/index/FilterableTermsEnum.java index 61c9e9f54cb51..224c5b600c930 100644 --- a/server/src/main/java/org/opensearch/common/lucene/index/FilterableTermsEnum.java +++ b/server/src/main/java/org/opensearch/common/lucene/index/FilterableTermsEnum.java @@ -140,7 +140,7 @@ protected boolean match(int doc) { } enums.add(new Holder(termsEnum, bits)); } - this.enums = enums.toArray(new Holder[enums.size()]); + this.enums = enums.toArray(new Holder[0]); } @Override diff --git a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java index a893fcecf5b88..6986bd8504f84 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/MultiPhrasePrefixQuery.java @@ -323,10 +323,6 @@ private boolean termArraysEquals(List termArrays1, List termArra return true; } - public String getField() { - return field; - } - @Override public void visit(QueryVisitor visitor) { visitor.visitLeaf(this); diff --git a/server/src/main/java/org/opensearch/common/lucene/search/Queries.java b/server/src/main/java/org/opensearch/common/lucene/search/Queries.java index 8b64a45b9db25..125eab9512be8 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/Queries.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/Queries.java @@ -87,10 +87,6 @@ public static Query newLenientFieldQuery(String field, RuntimeException e) { return Queries.newMatchNoDocsQuery("failed [" + field + "] query, caused by " + message); } - public static Query newNestedFilter() { - return not(newNonNestedFilter()); - } - /** * Creates a new non-nested docs query */ diff --git a/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java b/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java index 35aab81e94bc4..d7ffa2df943b7 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java @@ -619,7 +619,7 @@ public Query like(int docNum) throws IOException { if (fieldNames == null) { // gather list of valid fields from lucene Collection fields = FieldInfos.getIndexedFields(ir); - fieldNames = fields.toArray(new String[fields.size()]); + fieldNames = fields.toArray(new String[0]); } return createQuery(retrieveTerms(docNum)); diff --git a/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java b/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java index c439b57de41cd..625833618b464 100644 --- a/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java +++ b/server/src/main/java/org/opensearch/common/lucene/search/function/WeightFactorFunction.java @@ -73,10 +73,6 @@ public WeightFactorFunction(float weight) { this(weight, null, null); } - public WeightFactorFunction(float weight, @Nullable String functionName) { - this(weight, null, functionName); - } - @Override public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOException { final LeafScoreFunction leafFunction = scoreFunction.getLeafScoreFunction(ctx); diff --git a/server/src/main/java/org/opensearch/common/network/NetworkService.java b/server/src/main/java/org/opensearch/common/network/NetworkService.java index e50c71af4f483..0fb299ef66e70 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkService.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkService.java @@ -205,7 +205,7 @@ public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOE // 1. single wildcard address, probably set by network.host: expand to all interface addresses. if (addresses.length == 1 && addresses[0].isAnyLocalAddress()) { HashSet all = new HashSet<>(Arrays.asList(NetworkUtils.getAllAddresses())); - addresses = all.toArray(new InetAddress[all.size()]); + addresses = all.toArray(new InetAddress[0]); } // 2. try to deal with some (mis)configuration @@ -248,7 +248,7 @@ private InetAddress[] resolveInetAddresses(String hosts[]) throws IOException { for (String host : hosts) { set.addAll(Arrays.asList(resolveInternal(host))); } - return set.toArray(new InetAddress[set.size()]); + return set.toArray(new InetAddress[0]); } /** resolves a single host specification */ diff --git a/server/src/main/java/org/opensearch/common/network/NetworkUtils.java b/server/src/main/java/org/opensearch/common/network/NetworkUtils.java index 8660b876fa187..8816260c5d26f 100644 --- a/server/src/main/java/org/opensearch/common/network/NetworkUtils.java +++ b/server/src/main/java/org/opensearch/common/network/NetworkUtils.java @@ -239,7 +239,7 @@ static InetAddress[] getAddressesForInterface(String name) throws SocketExceptio if (list.isEmpty()) { throw new IllegalArgumentException("Interface '" + name + "' has no internet addresses"); } - return list.toArray(new InetAddress[list.size()]); + return list.toArray(new InetAddress[0]); } /** Returns only the IPV4 addresses in {@code addresses} */ @@ -253,7 +253,7 @@ static InetAddress[] filterIPV4(InetAddress addresses[]) { if (list.isEmpty()) { throw new IllegalArgumentException("No ipv4 addresses found in " + Arrays.toString(addresses)); } - return list.toArray(new InetAddress[list.size()]); + return list.toArray(new InetAddress[0]); } /** Returns only the IPV6 addresses in {@code addresses} */ @@ -267,6 +267,6 @@ static InetAddress[] filterIPV6(InetAddress addresses[]) { if (list.isEmpty()) { throw new IllegalArgumentException("No ipv6 addresses found in " + Arrays.toString(addresses)); } - return list.toArray(new InetAddress[list.size()]); + return list.toArray(new InetAddress[0]); } } diff --git a/server/src/main/java/org/opensearch/common/settings/Settings.java b/server/src/main/java/org/opensearch/common/settings/Settings.java index 725817ef22c6c..5e15e1693c017 100644 --- a/server/src/main/java/org/opensearch/common/settings/Settings.java +++ b/server/src/main/java/org/opensearch/common/settings/Settings.java @@ -1040,7 +1040,7 @@ public Builder put(Settings settings, boolean copySecureSettings) { } private void processLegacyLists(Map map) { - String[] array = map.keySet().toArray(new String[map.size()]); + String[] array = map.keySet().toArray(new String[0]); for (String key : array) { if (key.endsWith(".0")) { // let's only look at the head of the list and convert in order starting there. int counter = 0; diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsFilter.java b/server/src/main/java/org/opensearch/common/settings/SettingsFilter.java index 9914674068e66..b12360a75c878 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsFilter.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsFilter.java @@ -117,7 +117,7 @@ private static Settings filterSettings(Iterable patterns, Settings setti } } if (!simpleMatchPatternList.isEmpty()) { - String[] simpleMatchPatterns = simpleMatchPatternList.toArray(new String[simpleMatchPatternList.size()]); + String[] simpleMatchPatterns = simpleMatchPatternList.toArray(new String[0]); builder.keys().removeIf(key -> Regex.simpleMatch(simpleMatchPatterns, key)); } return builder.build(); diff --git a/server/src/main/java/org/opensearch/common/util/CombinedRateLimiter.java b/server/src/main/java/org/opensearch/common/util/CombinedRateLimiter.java deleted file mode 100644 index 451add56b255a..0000000000000 --- a/server/src/main/java/org/opensearch/common/util/CombinedRateLimiter.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.common.util; - -import org.apache.lucene.store.RateLimiter; -import org.opensearch.common.unit.ByteSizeValue; - -import java.util.concurrent.atomic.AtomicLong; - -/** - * A rate limiter designed for multiple concurrent users. - * - * @opensearch.internal - */ -public class CombinedRateLimiter { - - // TODO: This rate limiter has some concurrency issues between the two maybePause operations - - private final AtomicLong bytesSinceLastPause = new AtomicLong(); - private final RateLimiter.SimpleRateLimiter rateLimiter; - private volatile boolean rateLimit; - - public CombinedRateLimiter(ByteSizeValue maxBytesPerSec) { - rateLimit = maxBytesPerSec.getBytes() > 0; - rateLimiter = new RateLimiter.SimpleRateLimiter(maxBytesPerSec.getMbFrac()); - } - - public long maybePause(int bytes) { - if (rateLimit) { - long bytesSincePause = bytesSinceLastPause.addAndGet(bytes); - if (bytesSincePause > rateLimiter.getMinPauseCheckBytes()) { - // Time to pause - bytesSinceLastPause.addAndGet(-bytesSincePause); - return Math.max(rateLimiter.pause(bytesSincePause), 0); - } - } - return 0; - } - - public void setMBPerSec(ByteSizeValue maxBytesPerSec) { - rateLimit = maxBytesPerSec.getBytes() > 0; - rateLimiter.setMBPerSec(maxBytesPerSec.getMbFrac()); - } - - public double getMBPerSec() { - return rateLimiter.getMBPerSec(); - } -} diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 7297479776da9..31dd621f678ad 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -37,6 +37,12 @@ public class FeatureFlags { */ public static final String SEARCHABLE_SNAPSHOT = "opensearch.experimental.feature.searchable_snapshot.enabled"; + /** + * Gates the functionality of extensions. + * Once the feature is ready for production release, this feature flag can be removed. + */ + public static final String EXTENSIONS = "opensearch.experimental.feature.extensions.enabled"; + /** * Used to test feature flags whose values are expected to be booleans. * This method returns true if the value is "true" (case-insensitive), diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java index d3d0f6080e7f6..b4673d9534922 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java @@ -81,7 +81,7 @@ public Pending[] getPending() { List pending = new ArrayList<>(); addPending(new ArrayList<>(current), pending, true); addPending(new ArrayList<>(getQueue()), pending, false); - return pending.toArray(new Pending[pending.size()]); + return pending.toArray(new Pending[0]); } public int getNumberOfPendingTasks() { diff --git a/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java b/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java deleted file mode 100644 index 2de585e049f56..0000000000000 --- a/server/src/main/java/org/opensearch/discovery/AckClusterStatePublishResponseHandler.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.discovery; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.cluster.node.DiscoveryNode; - -import java.util.Set; - -/** - * Allows to wait for all nodes to reply to the publish of a new cluster state - * and notifies the {@link org.opensearch.discovery.Discovery.AckListener} - * so that the cluster state update can be acknowledged - * - * @opensearch.internal - */ -public class AckClusterStatePublishResponseHandler extends BlockingClusterStatePublishResponseHandler { - - private static final Logger logger = LogManager.getLogger(AckClusterStatePublishResponseHandler.class); - - private final Discovery.AckListener ackListener; - - /** - * Creates a new AckClusterStatePublishResponseHandler - * @param publishingToNodes the set of nodes to which the cluster state will be published and should respond - * @param ackListener the {@link org.opensearch.discovery.Discovery.AckListener} to notify for each response - * gotten from non cluster-manager nodes - */ - public AckClusterStatePublishResponseHandler(Set publishingToNodes, Discovery.AckListener ackListener) { - // Don't count the cluster-manager as acknowledged, because it's not done yet - // otherwise we might end up with all the nodes but the cluster-manager holding the latest cluster state - super(publishingToNodes); - this.ackListener = ackListener; - } - - @Override - public void onResponse(DiscoveryNode node) { - super.onResponse(node); - onNodeAck(ackListener, node, null); - } - - @Override - public void onFailure(DiscoveryNode node, Exception e) { - try { - super.onFailure(node, e); - } finally { - onNodeAck(ackListener, node, e); - } - } - - private void onNodeAck(final Discovery.AckListener ackListener, DiscoveryNode node, Exception e) { - try { - ackListener.onNodeAck(node, e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.debug(() -> new ParameterizedMessage("error while processing ack for node [{}]", node), inner); - } - } -} diff --git a/server/src/main/java/org/opensearch/env/NodeEnvironment.java b/server/src/main/java/org/opensearch/env/NodeEnvironment.java index 1eb64de2126d6..a29e088b2df8d 100644 --- a/server/src/main/java/org/opensearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/opensearch/env/NodeEnvironment.java @@ -1005,7 +1005,7 @@ public Path[] resolveIndexFolder(String indexFolderName) { paths.add(indexFolder); } } - return paths.toArray(new Path[paths.size()]); + return paths.toArray(new Path[0]); } /** diff --git a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java index 735140ca5dc24..e2666491630b7 100644 --- a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java @@ -113,10 +113,7 @@ public void allocateDangled(Collection indices, ActionListener extends Parser> { - private static final int MAX_NUMBER_OF_VALUES_IN_ARRAY_FORMAT = 3; /** * Note that this parser is only used for formatting values. */ @@ -285,27 +281,32 @@ private P process(P in) { @Override public List

parse(XContentParser parser) throws IOException, ParseException { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { - parser.nextToken(); - if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { - XContentBuilder xContentBuilder = reconstructArrayXContent(parser); - try ( - XContentParser subParser = createParser( - parser.getXContentRegistry(), - parser.getDeprecationHandler(), - xContentBuilder - ); - ) { - return Collections.singletonList(process(objectParser.apply(subParser, pointSupplier.get()))); + XContentParser.Token token = parser.nextToken(); + P point = pointSupplier.get(); + ArrayList

points = new ArrayList<>(); + if (token == XContentParser.Token.VALUE_NUMBER) { + double x = parser.doubleValue(); + parser.nextToken(); + double y = parser.doubleValue(); + token = parser.nextToken(); + if (token == XContentParser.Token.VALUE_NUMBER) { + GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + } else if (token != XContentParser.Token.END_ARRAY) { + throw new OpenSearchParseException("field type does not accept > 3 dimensions"); } + + point.resetCoords(x, y); + points.add(process(point)); } else { - ArrayList

points = new ArrayList<>(); - while (parser.currentToken() != XContentParser.Token.END_ARRAY) { - points.add(process(objectParser.apply(parser, pointSupplier.get()))); - parser.nextToken(); + while (token != XContentParser.Token.END_ARRAY) { + points.add(process(objectParser.apply(parser, point))); + point = pointSupplier.get(); + token = parser.nextToken(); } - return points; } + return points; } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { if (nullValue == null) { return null; @@ -317,37 +318,6 @@ public List

parse(XContentParser parser) throws IOException, ParseException { } } - private XContentParser createParser( - NamedXContentRegistry namedXContentRegistry, - DeprecationHandler deprecationHandler, - XContentBuilder xContentBuilder - ) throws IOException { - XContentParser subParser = xContentBuilder.contentType() - .xContent() - .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()); - subParser.nextToken(); - return subParser; - } - - private XContentBuilder reconstructArrayXContent(XContentParser parser) throws IOException { - XContentBuilder builder = XContentFactory.jsonBuilder().startArray(); - int numberOfValuesAdded = 0; - while (parser.currentToken() != XContentParser.Token.END_ARRAY) { - if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { - throw new OpenSearchParseException("numeric value expected"); - } - builder.value(parser.doubleValue()); - parser.nextToken(); - - // Allows one more value to be added so that the error case can be handled by a parser - if (++numberOfValuesAdded > MAX_NUMBER_OF_VALUES_IN_ARRAY_FORMAT) { - break; - } - } - builder.endArray(); - return builder; - } - @Override public Object format(List

points, String format) { List result = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java index 24f27139f6f4c..6bd1ba772c723 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ParseContext.java +++ b/server/src/main/java/org/opensearch/index/mapper/ParseContext.java @@ -143,7 +143,7 @@ public IndexableField[] getFields(String name) { f.add(field); } } - return f.toArray(new IndexableField[f.size()]); + return f.toArray(new IndexableField[0]); } public IndexableField getField(String name) { diff --git a/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java index d76a2b98b951a..ec16532eb0093 100644 --- a/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/IdsQueryBuilder.java @@ -99,7 +99,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } - out.writeStringArray(ids.toArray(new String[ids.size()])); + out.writeStringArray(ids.toArray(new String[0])); } /** diff --git a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java index 71275e53fce95..32309eac90c5c 100644 --- a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java @@ -380,7 +380,7 @@ public static Item parse(XContentParser parser, Item item) throws IOException { while (parser.nextToken() != XContentParser.Token.END_ARRAY) { fields.add(parser.text()); } - item.fields(fields.toArray(new String[fields.size()])); + item.fields(fields.toArray(new String[0])); } else { throw new OpenSearchParseException("failed to parse More Like This item. field [fields] must be an array"); } @@ -680,7 +680,7 @@ public MoreLikeThisQueryBuilder stopWords(List stopWords) { if (stopWords == null) { throw new IllegalArgumentException("requires stopwords to be non-null"); } - this.stopWords = stopWords.toArray(new String[stopWords.size()]); + this.stopWords = stopWords.toArray(new String[0]); return this; } @@ -889,11 +889,11 @@ public static MoreLikeThisQueryBuilder fromXContent(XContentParser parser) throw throw new ParsingException(parser.getTokenLocation(), "more_like_this requires 'fields' to be non-empty"); } - String[] fieldsArray = fields == null ? null : fields.toArray(new String[fields.size()]); - String[] likeTextsArray = likeTexts.isEmpty() ? null : likeTexts.toArray(new String[likeTexts.size()]); - String[] unlikeTextsArray = unlikeTexts.isEmpty() ? null : unlikeTexts.toArray(new String[unlikeTexts.size()]); - Item[] likeItemsArray = likeItems.isEmpty() ? null : likeItems.toArray(new Item[likeItems.size()]); - Item[] unlikeItemsArray = unlikeItems.isEmpty() ? null : unlikeItems.toArray(new Item[unlikeItems.size()]); + String[] fieldsArray = fields == null ? null : fields.toArray(new String[0]); + String[] likeTextsArray = likeTexts.isEmpty() ? null : likeTexts.toArray(new String[0]); + String[] unlikeTextsArray = unlikeTexts.isEmpty() ? null : unlikeTexts.toArray(new String[0]); + Item[] likeItemsArray = likeItems.isEmpty() ? null : likeItems.toArray(new Item[0]); + Item[] unlikeItemsArray = unlikeItems.isEmpty() ? null : unlikeItems.toArray(new Item[0]); MoreLikeThisQueryBuilder moreLikeThisQueryBuilder = new MoreLikeThisQueryBuilder(fieldsArray, likeTextsArray, likeItemsArray) .unlike(unlikeTextsArray) @@ -1017,7 +1017,7 @@ protected Query doToQuery(QueryShardContext context) throws IOException { if (moreLikeFields.isEmpty()) { return null; } - mltQuery.setMoreLikeFields(moreLikeFields.toArray(new String[moreLikeFields.size()])); + mltQuery.setMoreLikeFields(moreLikeFields.toArray(new String[0])); // handle like texts if (likeTexts.length > 0) { @@ -1090,7 +1090,7 @@ private static void setDefaultIndexTypeFields( if (useDefaultField) { item.fields("*"); } else { - item.fields(moreLikeFields.toArray(new String[moreLikeFields.size()])); + item.fields(moreLikeFields.toArray(new String[0])); } } } diff --git a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java index d50585ae0aebf..d3e092cd1cab3 100644 --- a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java +++ b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java @@ -81,11 +81,7 @@ private Query getVectorQueryFromShape(Geometry queryShape, String fieldName, Sha if (geometries.size() == 0) { return new MatchNoDocsQuery(); } - return LatLonShape.newGeometryQuery( - fieldName, - relation.getLuceneRelation(), - geometries.toArray(new LatLonGeometry[geometries.size()]) - ); + return LatLonShape.newGeometryQuery(fieldName, relation.getLuceneRelation(), geometries.toArray(new LatLonGeometry[0])); } /** diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java index 19edcfc2b0b48..26be6fae56929 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -602,7 +602,7 @@ public static FunctionScoreQueryBuilder fromXContent(XContentParser parser) thro FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder( query, - filterFunctionBuilders.toArray(new FunctionScoreQueryBuilder.FilterFunctionBuilder[filterFunctionBuilders.size()]) + filterFunctionBuilders.toArray(new FilterFunctionBuilder[0]) ); if (combineFunction != null) { functionScoreQueryBuilder.boostMode(combineFunction); diff --git a/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java b/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java index b444757b85951..bd8398eab91c1 100644 --- a/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java +++ b/server/src/main/java/org/opensearch/index/reindex/ReindexRequest.java @@ -400,7 +400,7 @@ private static String[] extractStringArray(Map source, String na if (value instanceof List) { @SuppressWarnings("unchecked") List list = (List) value; - return list.toArray(new String[list.size()]); + return list.toArray(new String[0]); } else if (value instanceof String) { return Strings.splitStringByCommaToArray((String) value); } else { diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 4c17db2044e15..701dec069d946 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -902,7 +902,7 @@ private boolean invariant() { if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases) { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { - if (checkpoints.get(shardRouting.allocationId().getId()).tracked) { + if (checkpoints.get(shardRouting.allocationId().getId()).tracked && !indexSettings().isRemoteTranslogStoreEnabled()) { assert retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases; assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals( diff --git a/server/src/main/java/org/opensearch/index/shard/LocalShardSnapshot.java b/server/src/main/java/org/opensearch/index/shard/LocalShardSnapshot.java index dc02cc054118c..82ec4c72e3d0c 100644 --- a/server/src/main/java/org/opensearch/index/shard/LocalShardSnapshot.java +++ b/server/src/main/java/org/opensearch/index/shard/LocalShardSnapshot.java @@ -96,7 +96,7 @@ Directory getSnapshotDirectory() { @Override public String[] listAll() throws IOException { Collection fileNames = wrappedIndexCommit.get().getFileNames(); - final String[] fileNameArray = fileNames.toArray(new String[fileNames.size()]); + final String[] fileNameArray = fileNames.toArray(new String[0]); return fileNameArray; } diff --git a/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java new file mode 100644 index 0000000000000..9ffe61208b78c --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandler.java @@ -0,0 +1,231 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.ThreadedActionListener; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.cluster.routing.IndexShardRoutingTable; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.engine.RecoveryEngineException; +import org.opensearch.index.seqno.ReplicationTracker; +import org.opensearch.index.seqno.RetentionLease; +import org.opensearch.index.seqno.RetentionLeaseNotFoundException; +import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.translog.Translog; +import org.opensearch.indices.RunUnderPrimaryPermit; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transports; + +import java.io.Closeable; +import java.io.IOException; +import java.util.function.Consumer; + +/** + * This handler is used for node-to-node peer recovery when the recovery target is a replica/ or a relocating primary + * shard with translog backed by local store. + * + * @opensearch.internal + */ +public class LocalStorePeerRecoverySourceHandler extends RecoverySourceHandler { + + public LocalStorePeerRecoverySourceHandler( + IndexShard shard, + RecoveryTargetHandler recoveryTarget, + ThreadPool threadPool, + StartRecoveryRequest request, + int fileChunkSizeInBytes, + int maxConcurrentFileChunks, + int maxConcurrentOperations + ) { + super(shard, recoveryTarget, threadPool, request, fileChunkSizeInBytes, maxConcurrentFileChunks, maxConcurrentOperations); + } + + @Override + protected void innerRecoveryToTarget(ActionListener listener, Consumer onFailure) throws IOException { + final SetOnce retentionLeaseRef = new SetOnce<>(); + + RunUnderPrimaryPermit.run(() -> { + final IndexShardRoutingTable routingTable = shard.getReplicationGroup().getRoutingTable(); + ShardRouting targetShardRouting = routingTable.getByAllocationId(request.targetAllocationId()); + if (targetShardRouting == null) { + logger.debug( + "delaying recovery of {} as it is not listed as assigned to target node {}", + request.shardId(), + request.targetNode() + ); + throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); + } + assert targetShardRouting.initializing() : "expected recovery target to be initializing but was " + targetShardRouting; + retentionLeaseRef.set(shard.getRetentionLeases().get(ReplicationTracker.getPeerRecoveryRetentionLeaseId(targetShardRouting))); + }, shardId + " validating recovery target [" + request.targetAllocationId() + "] registered ", shard, cancellableThreads, logger); + final Closeable retentionLock = shard.acquireHistoryRetentionLock(); + resources.add(retentionLock); + final long startingSeqNo; + final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO + && isTargetSameHistory() + && shard.hasCompleteHistoryOperations(PEER_RECOVERY_NAME, request.startingSeqNo()) + && ((retentionLeaseRef.get() == null && shard.useRetentionLeasesInPeerRecovery() == false) + || (retentionLeaseRef.get() != null && retentionLeaseRef.get().retainingSequenceNumber() <= request.startingSeqNo())); + // NB check hasCompleteHistoryOperations when computing isSequenceNumberBasedRecovery, even if there is a retention lease, + // because when doing a rolling upgrade from earlier than 7.4 we may create some leases that are initially unsatisfied. It's + // possible there are other cases where we cannot satisfy all leases, because that's not a property we currently expect to hold. + // Also it's pretty cheap when soft deletes are enabled, and it'd be a disaster if we tried a sequence-number-based recovery + // without having a complete history. + + if (isSequenceNumberBasedRecovery && retentionLeaseRef.get() != null) { + // all the history we need is retained by an existing retention lease, so we do not need a separate retention lock + retentionLock.close(); + logger.trace("history is retained by {}", retentionLeaseRef.get()); + } else { + // all the history we need is retained by the retention lock, obtained before calling shard.hasCompleteHistoryOperations() + // and before acquiring the safe commit we'll be using, so we can be certain that all operations after the safe commit's + // local checkpoint will be retained for the duration of this recovery. + logger.trace("history is retained by retention lock"); + } + + final StepListener sendFileStep = new StepListener<>(); + final StepListener prepareEngineStep = new StepListener<>(); + final StepListener sendSnapshotStep = new StepListener<>(); + final StepListener finalizeStep = new StepListener<>(); + + if (isSequenceNumberBasedRecovery) { + logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); + startingSeqNo = request.startingSeqNo(); + if (retentionLeaseRef.get() == null) { + createRetentionLease(startingSeqNo, ActionListener.map(sendFileStep, ignored -> SendFileResult.EMPTY)); + } else { + sendFileStep.onResponse(SendFileResult.EMPTY); + } + } else { + final GatedCloseable wrappedSafeCommit; + try { + wrappedSafeCommit = acquireSafeCommit(shard); + resources.add(wrappedSafeCommit); + } catch (final Exception e) { + throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); + } + + // Try and copy enough operations to the recovering peer so that if it is promoted to primary then it has a chance of being + // able to recover other replicas using operations-based recoveries. If we are not using retention leases then we + // conservatively copy all available operations. If we are using retention leases then "enough operations" is just the + // operations from the local checkpoint of the safe commit onwards, because when using soft deletes the safe commit retains + // at least as much history as anything else. The safe commit will often contain all the history retained by the current set + // of retention leases, but this is not guaranteed: an earlier peer recovery from a different primary might have created a + // retention lease for some history that this primary already discarded, since we discard history when the global checkpoint + // advances and not when creating a new safe commit. In any case this is a best-effort thing since future recoveries can + // always fall back to file-based ones, and only really presents a problem if this primary fails before things have settled + // down. + startingSeqNo = Long.parseLong(wrappedSafeCommit.get().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1L; + logger.trace("performing file-based recovery followed by history replay starting at [{}]", startingSeqNo); + + try { + final int estimateNumOps = countNumberOfHistoryOperations(startingSeqNo); + final Releasable releaseStore = acquireStore(shard.store()); + resources.add(releaseStore); + onSendFileStepComplete(sendFileStep, wrappedSafeCommit, releaseStore); + + final StepListener deleteRetentionLeaseStep = new StepListener<>(); + RunUnderPrimaryPermit.run(() -> { + try { + // If the target previously had a copy of this shard then a file-based recovery might move its global + // checkpoint backwards. We must therefore remove any existing retention lease so that we can create a + // new one later on in the recovery. + shard.removePeerRecoveryRetentionLease( + request.targetNode().getId(), + new ThreadedActionListener<>( + logger, + shard.getThreadPool(), + ThreadPool.Names.GENERIC, + deleteRetentionLeaseStep, + false + ) + ); + } catch (RetentionLeaseNotFoundException e) { + logger.debug("no peer-recovery retention lease for " + request.targetAllocationId()); + deleteRetentionLeaseStep.onResponse(null); + } + }, shardId + " removing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); + + deleteRetentionLeaseStep.whenComplete(ignored -> { + assert Transports.assertNotTransportThread(this + "[phase1]"); + phase1(wrappedSafeCommit.get(), startingSeqNo, () -> estimateNumOps, sendFileStep, false); + }, onFailure); + + } catch (final Exception e) { + throw new RecoveryEngineException(shard.shardId(), 1, "sendFileStep failed", e); + } + } + assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; + + sendFileStep.whenComplete(r -> { + assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); + }, onFailure); + + prepareEngineStep.whenComplete(prepareEngineTime -> { + assert Transports.assertNotTransportThread(this + "[phase2]"); + /* + * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. + * This means that any document indexed into the primary after this will be replicated to this replica as well + * make sure to do this before sampling the max sequence number in the next step, to ensure that we send + * all documents up to maxSeqNo in phase2. + */ + RunUnderPrimaryPermit.run( + () -> shard.initiateTracking(request.targetAllocationId()), + shardId + " initiating tracking of " + request.targetAllocationId(), + shard, + cancellableThreads, + logger + ); + + final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); + if (logger.isTraceEnabled()) { + logger.trace("snapshot translog for recovery; current size is [{}]", countNumberOfHistoryOperations(startingSeqNo)); + } + final Translog.Snapshot phase2Snapshot = shard.newChangesSnapshot( + PEER_RECOVERY_NAME, + startingSeqNo, + Long.MAX_VALUE, + false, + true + ); + resources.add(phase2Snapshot); + retentionLock.close(); + + // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values + // are at least as high as the corresponding values on the primary when any of these operations were executed on it. + final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); + final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); + final RetentionLeases retentionLeases = shard.getRetentionLeases(); + final long mappingVersionOnPrimary = shard.indexSettings().getIndexMetadata().getMappingVersion(); + phase2( + startingSeqNo, + endingSeqNo, + phase2Snapshot, + maxSeenAutoIdTimestamp, + maxSeqNoOfUpdatesOrDeletes, + retentionLeases, + mappingVersionOnPrimary, + sendSnapshotStep + ); + + }, onFailure); + finalizeStepAndCompleteFuture(startingSeqNo, sendSnapshotStep, sendFileStep, prepareEngineStep, onFailure); + } +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java index a1cf78920cf7e..8bea14a1a1c86 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoverySourceService.java @@ -378,15 +378,7 @@ private Tuple createRecovery recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime) ); - handler = new RecoverySourceHandler( - shard, - recoveryTarget, - shard.getThreadPool(), - request, - Math.toIntExact(recoverySettings.getChunkSize().getBytes()), - recoverySettings.getMaxConcurrentFileChunks(), - recoverySettings.getMaxConcurrentOperations() - ); + handler = RecoverySourceHandlerFactory.create(shard, recoveryTarget, request, recoverySettings); return Tuple.tuple(handler, recoveryTarget); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 505d3c7adfb3f..03d1066ae5a60 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -39,15 +39,12 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; -import org.apache.lucene.util.SetOnce; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.ThreadedActionListener; import org.opensearch.action.support.replication.ReplicationResponse; -import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.CheckedRunnable; import org.opensearch.common.StopWatch; import org.opensearch.common.concurrent.GatedCloseable; @@ -62,7 +59,6 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.internal.io.IOUtils; import org.opensearch.index.engine.RecoveryEngineException; -import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLease; import org.opensearch.index.seqno.RetentionLeaseNotFoundException; import org.opensearch.index.seqno.RetentionLeases; @@ -97,7 +93,7 @@ * RecoverySourceHandler handles the three phases of shard recovery, which is * everything relating to copying the segment files as well as sending translog * operations across the wire once the segments have been copied. - * + *

* Note: There is always one source handler per recovery that handles all the * file and translog transfer. This handler is completely isolated from other recoveries * while the {@link RateLimiter} passed via {@link RecoverySettings} is shared across recoveries @@ -106,25 +102,25 @@ * * @opensearch.internal */ -public class RecoverySourceHandler { +public abstract class RecoverySourceHandler { protected final Logger logger; // Shard that is going to be recovered (the "source") - private final IndexShard shard; - private final int shardId; + protected final IndexShard shard; + protected final int shardId; // Request containing source and target node information - private final StartRecoveryRequest request; + protected final StartRecoveryRequest request; private final int chunkSizeInBytes; private final RecoveryTargetHandler recoveryTarget; private final int maxConcurrentOperations; private final ThreadPool threadPool; - private final CancellableThreads cancellableThreads = new CancellableThreads(); - private final List resources = new CopyOnWriteArrayList<>(); - private final ListenableFuture future = new ListenableFuture<>(); + protected final CancellableThreads cancellableThreads = new CancellableThreads(); + protected final List resources = new CopyOnWriteArrayList<>(); + protected final ListenableFuture future = new ListenableFuture<>(); public static final String PEER_RECOVERY_NAME = "peer-recovery"; private final SegmentFileTransferHandler transferHandler; - public RecoverySourceHandler( + RecoverySourceHandler( IndexShard shard, RecoveryTargetHandler recoveryTarget, ThreadPool threadPool, @@ -183,251 +179,70 @@ public void recoverToTarget(ActionListener listener) { throw e; }); final Consumer onFailure = e -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[onFailure]"); + assert Transports.assertNotTransportThread(this + "[onFailure]"); IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); }; + innerRecoveryToTarget(listener, onFailure); + } catch (Exception e) { + IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); + } + } - final SetOnce retentionLeaseRef = new SetOnce<>(); + protected abstract void innerRecoveryToTarget(ActionListener listener, Consumer onFailure) + throws IOException; - RunUnderPrimaryPermit.run(() -> { - final IndexShardRoutingTable routingTable = shard.getReplicationGroup().getRoutingTable(); - ShardRouting targetShardRouting = routingTable.getByAllocationId(request.targetAllocationId()); - if (targetShardRouting == null) { - logger.debug( - "delaying recovery of {} as it is not listed as assigned to target node {}", - request.shardId(), - request.targetNode() - ); - throw new DelayRecoveryException("source node does not have the shard listed in its state as allocated on the node"); - } - assert targetShardRouting.initializing() : "expected recovery target to be initializing but was " + targetShardRouting; - retentionLeaseRef.set( - shard.getRetentionLeases().get(ReplicationTracker.getPeerRecoveryRetentionLeaseId(targetShardRouting)) - ); - }, - shardId + " validating recovery target [" + request.targetAllocationId() + "] registered ", - shard, - cancellableThreads, - logger + protected void finalizeStepAndCompleteFuture( + long startingSeqNo, + StepListener sendSnapshotStep, + StepListener sendFileStep, + StepListener prepareEngineStep, + Consumer onFailure + ) { + final StepListener finalizeStep = new StepListener<>(); + // Recovery target can trim all operations >= startingSeqNo as we have sent all these operations in the phase 2 + final long trimAboveSeqNo = startingSeqNo - 1; + sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep), onFailure); + + finalizeStep.whenComplete(r -> { + final long phase1ThrottlingWaitTime = 0L; // TODO: return the actual throttle time + final SendSnapshotResult sendSnapshotResult = sendSnapshotStep.result(); + final SendFileResult sendFileResult = sendFileStep.result(); + final RecoveryResponse response = new RecoveryResponse( + sendFileResult.phase1FileNames, + sendFileResult.phase1FileSizes, + sendFileResult.phase1ExistingFileNames, + sendFileResult.phase1ExistingFileSizes, + sendFileResult.totalSize, + sendFileResult.existingTotalSize, + sendFileResult.took.millis(), + phase1ThrottlingWaitTime, + prepareEngineStep.result().millis(), + sendSnapshotResult.sentOperations, + sendSnapshotResult.tookTime.millis() ); - final Closeable retentionLock = shard.acquireHistoryRetentionLock(); - resources.add(retentionLock); - final long startingSeqNo; - final boolean isSequenceNumberBasedRecovery = request.startingSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO - && isTargetSameHistory() - && shard.hasCompleteHistoryOperations(PEER_RECOVERY_NAME, request.startingSeqNo()) - && ((retentionLeaseRef.get() == null && shard.useRetentionLeasesInPeerRecovery() == false) - || (retentionLeaseRef.get() != null && retentionLeaseRef.get().retainingSequenceNumber() <= request.startingSeqNo())); - // NB check hasCompleteHistoryOperations when computing isSequenceNumberBasedRecovery, even if there is a retention lease, - // because when doing a rolling upgrade from earlier than 7.4 we may create some leases that are initially unsatisfied. It's - // possible there are other cases where we cannot satisfy all leases, because that's not a property we currently expect to hold. - // Also it's pretty cheap when soft deletes are enabled, and it'd be a disaster if we tried a sequence-number-based recovery - // without having a complete history. - - if (isSequenceNumberBasedRecovery && retentionLeaseRef.get() != null) { - // all the history we need is retained by an existing retention lease, so we do not need a separate retention lock - retentionLock.close(); - logger.trace("history is retained by {}", retentionLeaseRef.get()); - } else { - // all the history we need is retained by the retention lock, obtained before calling shard.hasCompleteHistoryOperations() - // and before acquiring the safe commit we'll be using, so we can be certain that all operations after the safe commit's - // local checkpoint will be retained for the duration of this recovery. - logger.trace("history is retained by retention lock"); - } - - final StepListener sendFileStep = new StepListener<>(); - final StepListener prepareEngineStep = new StepListener<>(); - final StepListener sendSnapshotStep = new StepListener<>(); - final StepListener finalizeStep = new StepListener<>(); - - if (isSequenceNumberBasedRecovery) { - logger.trace("performing sequence numbers based recovery. starting at [{}]", request.startingSeqNo()); - startingSeqNo = request.startingSeqNo(); - if (retentionLeaseRef.get() == null) { - createRetentionLease(startingSeqNo, ActionListener.map(sendFileStep, ignored -> SendFileResult.EMPTY)); - } else { - sendFileStep.onResponse(SendFileResult.EMPTY); - } - } else { - final GatedCloseable wrappedSafeCommit; - try { - wrappedSafeCommit = acquireSafeCommit(shard); - resources.add(wrappedSafeCommit); - } catch (final Exception e) { - throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); - } - - // Try and copy enough operations to the recovering peer so that if it is promoted to primary then it has a chance of being - // able to recover other replicas using operations-based recoveries. If we are not using retention leases then we - // conservatively copy all available operations. If we are using retention leases then "enough operations" is just the - // operations from the local checkpoint of the safe commit onwards, because when using soft deletes the safe commit retains - // at least as much history as anything else. The safe commit will often contain all the history retained by the current set - // of retention leases, but this is not guaranteed: an earlier peer recovery from a different primary might have created a - // retention lease for some history that this primary already discarded, since we discard history when the global checkpoint - // advances and not when creating a new safe commit. In any case this is a best-effort thing since future recoveries can - // always fall back to file-based ones, and only really presents a problem if this primary fails before things have settled - // down. - startingSeqNo = Long.parseLong(wrappedSafeCommit.get().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1L; - logger.trace("performing file-based recovery followed by history replay starting at [{}]", startingSeqNo); - - try { - final int estimateNumOps = countNumberOfHistoryOperations(startingSeqNo); - final Releasable releaseStore = acquireStore(shard.store()); - resources.add(releaseStore); - sendFileStep.whenComplete(r -> IOUtils.close(wrappedSafeCommit, releaseStore), e -> { - try { - IOUtils.close(wrappedSafeCommit, releaseStore); - } catch (final IOException ex) { - logger.warn("releasing snapshot caused exception", ex); - } - }); - - final StepListener deleteRetentionLeaseStep = new StepListener<>(); - RunUnderPrimaryPermit.run(() -> { - try { - // If the target previously had a copy of this shard then a file-based recovery might move its global - // checkpoint backwards. We must therefore remove any existing retention lease so that we can create a - // new one later on in the recovery. - shard.removePeerRecoveryRetentionLease( - request.targetNode().getId(), - new ThreadedActionListener<>( - logger, - shard.getThreadPool(), - ThreadPool.Names.GENERIC, - deleteRetentionLeaseStep, - false - ) - ); - } catch (RetentionLeaseNotFoundException e) { - logger.debug("no peer-recovery retention lease for " + request.targetAllocationId()); - deleteRetentionLeaseStep.onResponse(null); - } - }, shardId + " removing retention lease for [" + request.targetAllocationId() + "]", shard, cancellableThreads, logger); - - deleteRetentionLeaseStep.whenComplete(ignored -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[phase1]"); - phase1(wrappedSafeCommit.get(), startingSeqNo, () -> estimateNumOps, sendFileStep); - }, onFailure); - - } catch (final Exception e) { - throw new RecoveryEngineException(shard.shardId(), 1, "sendFileStep failed", e); - } + try { + future.onResponse(response); + } finally { + IOUtils.close(resources); } - assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; - - boolean isRecoveringReplicaWithRemoteTxLogEnabledIndex = request.isPrimaryRelocation() == false - && shard.isRemoteTranslogEnabled(); - - if (isRecoveringReplicaWithRemoteTxLogEnabledIndex) { - sendFileStep.whenComplete(r -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[prepareTargetForTranslog]"); - // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog(0, prepareEngineStep); - }, onFailure); - - prepareEngineStep.whenComplete(prepareEngineTime -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[phase2]"); - RunUnderPrimaryPermit.run( - () -> shard.initiateTracking(request.targetAllocationId()), - shardId + " initiating tracking of " + request.targetAllocationId(), - shard, - cancellableThreads, - logger - ); - final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); - retentionLock.close(); - sendSnapshotStep.onResponse(new SendSnapshotResult(endingSeqNo, 0, TimeValue.ZERO)); - }, onFailure); - } else { - sendFileStep.whenComplete(r -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[prepareTargetForTranslog]"); - // For a sequence based recovery, the target can keep its local translog - prepareTargetForTranslog(countNumberOfHistoryOperations(startingSeqNo), prepareEngineStep); - }, onFailure); - - prepareEngineStep.whenComplete(prepareEngineTime -> { - assert Transports.assertNotTransportThread(RecoverySourceHandler.this + "[phase2]"); - /* - * add shard to replication group (shard will receive replication requests from this point on) now that engine is open. - * This means that any document indexed into the primary after this will be replicated to this replica as well - * make sure to do this before sampling the max sequence number in the next step, to ensure that we send - * all documents up to maxSeqNo in phase2. - */ - RunUnderPrimaryPermit.run( - () -> shard.initiateTracking(request.targetAllocationId()), - shardId + " initiating tracking of " + request.targetAllocationId(), - shard, - cancellableThreads, - logger - ); - - final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); - if (logger.isTraceEnabled()) { - logger.trace("snapshot translog for recovery; current size is [{}]", countNumberOfHistoryOperations(startingSeqNo)); - } - final Translog.Snapshot phase2Snapshot = shard.newChangesSnapshot( - PEER_RECOVERY_NAME, - startingSeqNo, - Long.MAX_VALUE, - false, - true - ); - resources.add(phase2Snapshot); - retentionLock.close(); - - // we have to capture the max_seen_auto_id_timestamp and the max_seq_no_of_updates to make sure that these values - // are at least as high as the corresponding values on the primary when any of these operations were executed on it. - final long maxSeenAutoIdTimestamp = shard.getMaxSeenAutoIdTimestamp(); - final long maxSeqNoOfUpdatesOrDeletes = shard.getMaxSeqNoOfUpdatesOrDeletes(); - final RetentionLeases retentionLeases = shard.getRetentionLeases(); - final long mappingVersionOnPrimary = shard.indexSettings().getIndexMetadata().getMappingVersion(); - phase2( - startingSeqNo, - endingSeqNo, - phase2Snapshot, - maxSeenAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, - retentionLeases, - mappingVersionOnPrimary, - sendSnapshotStep - ); + }, onFailure); + } - }, onFailure); + protected void onSendFileStepComplete( + StepListener sendFileStep, + GatedCloseable wrappedSafeCommit, + Releasable releaseStore + ) { + sendFileStep.whenComplete(r -> IOUtils.close(wrappedSafeCommit, releaseStore), e -> { + try { + IOUtils.close(wrappedSafeCommit, releaseStore); + } catch (final IOException ex) { + logger.warn("releasing snapshot caused exception", ex); } - - // Recovery target can trim all operations >= startingSeqNo as we have sent all these operations in the phase 2 - final long trimAboveSeqNo = startingSeqNo - 1; - sendSnapshotStep.whenComplete(r -> finalizeRecovery(r.targetLocalCheckpoint, trimAboveSeqNo, finalizeStep), onFailure); - - finalizeStep.whenComplete(r -> { - final long phase1ThrottlingWaitTime = 0L; // TODO: return the actual throttle time - final SendSnapshotResult sendSnapshotResult = sendSnapshotStep.result(); - final SendFileResult sendFileResult = sendFileStep.result(); - final RecoveryResponse response = new RecoveryResponse( - sendFileResult.phase1FileNames, - sendFileResult.phase1FileSizes, - sendFileResult.phase1ExistingFileNames, - sendFileResult.phase1ExistingFileSizes, - sendFileResult.totalSize, - sendFileResult.existingTotalSize, - sendFileResult.took.millis(), - phase1ThrottlingWaitTime, - prepareEngineStep.result().millis(), - sendSnapshotResult.sentOperations, - sendSnapshotResult.tookTime.millis() - ); - try { - future.onResponse(response); - } finally { - IOUtils.close(resources); - } - }, onFailure); - } catch (Exception e) { - IOUtils.closeWhileHandlingException(releaseResources, () -> future.onFailure(e)); - } + }); } - private boolean isTargetSameHistory() { + protected boolean isTargetSameHistory() { final String targetHistoryUUID = request.metadataSnapshot().getHistoryUUID(); assert targetHistoryUUID != null : "incoming target history missing"; return targetHistoryUUID.equals(shard.getHistoryUUID()); @@ -435,10 +250,11 @@ private boolean isTargetSameHistory() { /** * Counts the number of history operations from the starting sequence number - * @param startingSeqNo the starting sequence number to count; included - * @return number of history operations + * + * @param startingSeqNo the starting sequence number to count; included + * @return number of history operations */ - private int countNumberOfHistoryOperations(long startingSeqNo) throws IOException { + protected int countNumberOfHistoryOperations(long startingSeqNo) throws IOException { return shard.countNumberOfHistoryOperations(PEER_RECOVERY_NAME, startingSeqNo, Long.MAX_VALUE); } @@ -446,7 +262,7 @@ private int countNumberOfHistoryOperations(long startingSeqNo) throws IOExceptio * Increases the store reference and returns a {@link Releasable} that will decrease the store reference using the generic thread pool. * We must never release the store using an interruptible thread as we can risk invalidating the node lock. */ - private Releasable acquireStore(Store store) { + protected Releasable acquireStore(Store store) { store.incRef(); return Releasables.releaseOnce(() -> runWithGenericThreadPool(store::decRef)); } @@ -456,7 +272,7 @@ private Releasable acquireStore(Store store) { * with the file systems due to interrupt (see {@link org.apache.lucene.store.NIOFSDirectory} javadocs for more detail). * This method acquires a safe commit and wraps it to make sure that it will be released using the generic thread pool. */ - private GatedCloseable acquireSafeCommit(IndexShard shard) { + protected GatedCloseable acquireSafeCommit(IndexShard shard) { final GatedCloseable wrappedSafeCommit = shard.acquireSafeIndexCommit(); final AtomicBoolean closed = new AtomicBoolean(false); return new GatedCloseable<>(wrappedSafeCommit.get(), () -> { @@ -530,7 +346,13 @@ static final class SendFileResult { * segments that are missing. Only segments that have the same size and * checksum can be reused */ - void phase1(IndexCommit snapshot, long startingSeqNo, IntSupplier translogOps, ActionListener listener) { + void phase1( + IndexCommit snapshot, + long startingSeqNo, + IntSupplier translogOps, + ActionListener listener, + boolean skipCreateRetentionLeaseStep + ) { cancellableThreads.checkForCancel(); final Store store = shard.store(); try { @@ -628,7 +450,12 @@ void phase1(IndexCommit snapshot, long startingSeqNo, IntSupplier translogOps, A listener::onFailure ); - sendFilesStep.whenComplete(r -> createRetentionLease(startingSeqNo, createRetentionLeaseStep), listener::onFailure); + // When doing peer recovery of remote store enabled replica, retention leases are not required. + if (skipCreateRetentionLeaseStep) { + sendFilesStep.whenComplete(r -> createRetentionLeaseStep.onResponse(null), listener::onFailure); + } else { + sendFilesStep.whenComplete(r -> createRetentionLease(startingSeqNo, createRetentionLeaseStep), listener::onFailure); + } createRetentionLeaseStep.whenComplete(retentionLease -> { final long lastKnownGlobalCheckpoint = shard.getLastKnownGlobalCheckpoint(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java new file mode 100644 index 0000000000000..ea13ca18bbfca --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandlerFactory.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.index.shard.IndexShard; + +/** + * Factory that supplies {@link RecoverySourceHandler}. + * + * @opensearch.internal + */ +public class RecoverySourceHandlerFactory { + + public static RecoverySourceHandler create( + IndexShard shard, + RecoveryTargetHandler recoveryTarget, + StartRecoveryRequest request, + RecoverySettings recoverySettings + ) { + boolean isReplicaRecoveryWithRemoteTranslog = request.isPrimaryRelocation() == false && shard.isRemoteTranslogEnabled(); + if (isReplicaRecoveryWithRemoteTranslog) { + return new RemoteStorePeerRecoverySourceHandler( + shard, + recoveryTarget, + shard.getThreadPool(), + request, + Math.toIntExact(recoverySettings.getChunkSize().getBytes()), + recoverySettings.getMaxConcurrentFileChunks(), + recoverySettings.getMaxConcurrentOperations() + ); + } else { + return new LocalStorePeerRecoverySourceHandler( + shard, + recoveryTarget, + shard.getThreadPool(), + request, + Math.toIntExact(recoverySettings.getChunkSize().getBytes()), + recoverySettings.getMaxConcurrentFileChunks(), + recoverySettings.getMaxConcurrentOperations() + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java new file mode 100644 index 0000000000000..ff218ef71e397 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandler.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.apache.lucene.index.IndexCommit; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.engine.RecoveryEngineException; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.RunUnderPrimaryPermit; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transports; + +import java.io.IOException; +import java.util.function.Consumer; + +/** + * This handler is used when peer recovery target is a remote store enabled replica. + * + * @opensearch.internal + */ +public class RemoteStorePeerRecoverySourceHandler extends RecoverySourceHandler { + + public RemoteStorePeerRecoverySourceHandler( + IndexShard shard, + RecoveryTargetHandler recoveryTarget, + ThreadPool threadPool, + StartRecoveryRequest request, + int fileChunkSizeInBytes, + int maxConcurrentFileChunks, + int maxConcurrentOperations + ) { + super(shard, recoveryTarget, threadPool, request, fileChunkSizeInBytes, maxConcurrentFileChunks, maxConcurrentOperations); + } + + @Override + protected void innerRecoveryToTarget(ActionListener listener, Consumer onFailure) throws IOException { + // A replica of an index with remote translog does not require the translogs locally and keeps receiving the + // updated segments file on refresh, flushes, and merges. In recovery, here, only file-based recovery is performed + // and there is no translog replay done. + + final StepListener sendFileStep = new StepListener<>(); + final StepListener prepareEngineStep = new StepListener<>(); + final StepListener sendSnapshotStep = new StepListener<>(); + + // It is always file based recovery while recovering replicas which are not relocating primary where the + // underlying indices are backed by remote store for storing segments and translog + + final GatedCloseable wrappedSafeCommit; + try { + wrappedSafeCommit = acquireSafeCommit(shard); + resources.add(wrappedSafeCommit); + } catch (final Exception e) { + throw new RecoveryEngineException(shard.shardId(), 1, "snapshot failed", e); + } + + final long startingSeqNo = Long.parseLong(wrappedSafeCommit.get().getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)) + 1L; + logger.trace("performing file-based recovery followed by history replay starting at [{}]", startingSeqNo); + + try { + final Releasable releaseStore = acquireStore(shard.store()); + resources.add(releaseStore); + onSendFileStepComplete(sendFileStep, wrappedSafeCommit, releaseStore); + + assert Transports.assertNotTransportThread(this + "[phase1]"); + phase1(wrappedSafeCommit.get(), startingSeqNo, () -> 0, sendFileStep, true); + } catch (final Exception e) { + throw new RecoveryEngineException(shard.shardId(), 1, "sendFileStep failed", e); + } + assert startingSeqNo >= 0 : "startingSeqNo must be non negative. got: " + startingSeqNo; + + sendFileStep.whenComplete(r -> { + assert Transports.assertNotTransportThread(this + "[prepareTargetForTranslog]"); + // For a sequence based recovery, the target can keep its local translog + prepareTargetForTranslog(0, prepareEngineStep); + }, onFailure); + + prepareEngineStep.whenComplete(prepareEngineTime -> { + assert Transports.assertNotTransportThread(this + "[phase2]"); + RunUnderPrimaryPermit.run( + () -> shard.initiateTracking(request.targetAllocationId()), + shardId + " initiating tracking of " + request.targetAllocationId(), + shard, + cancellableThreads, + logger + ); + final long endingSeqNo = shard.seqNoStats().getMaxSeqNo(); + sendSnapshotStep.onResponse(new SendSnapshotResult(endingSeqNo, 0, TimeValue.ZERO)); + }, onFailure); + + finalizeStepAndCompleteFuture(startingSeqNo, sendSnapshotStep, sendFileStep, prepareEngineStep, onFailure); + } +} diff --git a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java index 37cfee62bee27..a196a449fa10a 100644 --- a/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/opensearch/monitor/fs/FsProbe.java @@ -128,7 +128,7 @@ final FsInfo.IoStats ioStats(final Set> devicesNumbers, } } - return new FsInfo.IoStats(devicesStats.toArray(new FsInfo.DeviceStats[devicesStats.size()])); + return new FsInfo.IoStats(devicesStats.toArray(new FsInfo.DeviceStats[0])); } catch (Exception e) { // do not fail Elasticsearch if something unexpected // happens here diff --git a/server/src/main/java/org/opensearch/monitor/jvm/DeadlockAnalyzer.java b/server/src/main/java/org/opensearch/monitor/jvm/DeadlockAnalyzer.java index 12d749a2b9102..e48e036c1540b 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/DeadlockAnalyzer.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/DeadlockAnalyzer.java @@ -81,7 +81,7 @@ private Deadlock[] createDeadlockDescriptions(Set> cyc Deadlock result[] = new Deadlock[cycles.size()]; int count = 0; for (LinkedHashSet cycle : cycles) { - ThreadInfo asArray[] = cycle.toArray(new ThreadInfo[cycle.size()]); + ThreadInfo asArray[] = cycle.toArray(new ThreadInfo[0]); Deadlock d = new Deadlock(asArray); result[count++] = d; } diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index 426551ab50f18..c0f7720341557 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -80,7 +80,7 @@ public class JvmInfo implements ReportingService.Info { } catch (Exception t) { // ignore } - String[] inputArguments = runtimeMXBean.getInputArguments().toArray(new String[runtimeMXBean.getInputArguments().size()]); + String[] inputArguments = runtimeMXBean.getInputArguments().toArray(new String[0]); Mem mem = new Mem(heapInit, heapMax, nonHeapInit, nonHeapMax, directMemoryMax); String bootClassPath; diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java index e2b14629e75f4..85cfd09ea6695 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestTermVectorsAction.java @@ -123,7 +123,7 @@ public static void addFieldStringsFromParameter(TermVectorsRequest termVectorsRe } } if (selectedFields != null) { - termVectorsRequest.selectedFields(selectedFields.toArray(new String[selectedFields.size()])); + termVectorsRequest.selectedFields(selectedFields.toArray(new String[0])); } } diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java index 0e1febe9d2a61..193a50c718492 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestGetAllPitsAction.java @@ -54,7 +54,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli for (DiscoveryNode node : nodesInCluster.get()) { nodes.add(node); } - DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[nodes.size()]); + DiscoveryNode[] disNodesArr = nodes.toArray(new DiscoveryNode[0]); GetAllPitNodesRequest getAllPitNodesRequest = new GetAllPitNodesRequest(disNodesArr); return channel -> client.getAllPits(getAllPitNodesRequest, new RestBuilderListener(channel) { @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketCollector.java b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketCollector.java index 8f7222729efdb..cac3a6151bd78 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketCollector.java +++ b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketCollector.java @@ -188,7 +188,7 @@ private static class MultiLeafBucketCollector extends LeafBucketCollector { private int numCollectors; private MultiLeafBucketCollector(List collectors, boolean cacheScores) { - this.collectors = collectors.toArray(new LeafBucketCollector[collectors.size()]); + this.collectors = collectors.toArray(new LeafBucketCollector[0]); this.cacheScores = cacheScores; this.numCollectors = this.collectors.length; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java deleted file mode 100644 index a87de32d0e18b..0000000000000 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.search.aggregations.bucket.histogram; - -import java.util.concurrent.TimeUnit; - -/** - * An aggregator capable of reporting bucket sizes in milliseconds. Used by RateAggregator for calendar-based buckets. - * - * @opensearch.internal - */ -public interface SizedBucketAggregatorBuilder { - double calendarDivider(TimeUnit timeUnit); -} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java index 7be04b13f0d6b..dbb3de5add84b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java @@ -216,7 +216,7 @@ public SignificantTextAggregationBuilder fieldName(String fieldName) { * to also be the name of the JSON field holding the value */ public SignificantTextAggregationBuilder sourceFieldNames(List names) { - this.sourceFieldNames = names.toArray(new String[names.size()]); + this.sourceFieldNames = names.toArray(new String[0]); return this; } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java index 1e33b78d3f6c5..d44454112e7eb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/HyperLogLogPlusPlus.java @@ -236,10 +236,9 @@ private static class HyperLogLog extends AbstractHyperLogLog implements Releasab // array for holding the runlens. private ByteArray runLens; - HyperLogLog(BigArrays bigArrays, long initialBucketCount, int precision) { super(precision); - this.runLens = bigArrays.newByteArray(initialBucketCount << precision); + this.runLens = bigArrays.newByteArray(initialBucketCount << precision); this.bigArrays = bigArrays; this.iterator = new HyperLogLogIterator(this, precision, m); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsParser.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsParser.java index d1de39600ad73..312bc88e480aa 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsParser.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsParser.java @@ -85,7 +85,7 @@ public final BucketMetricsPipelineAggregationBuilder parse(String pipelineAgg String path = parser.text(); paths.add(path); } - bucketsPaths = paths.toArray(new String[paths.size()]); + bucketsPaths = paths.toArray(new String[0]); } else { parseToken(pipelineAggregatorName, parser, currentFieldName, token, params); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index 1455c2d6d8780..96f770647db8b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -223,7 +223,7 @@ public static DerivativePipelineAggregationBuilder parse(String pipelineAggregat String path = parser.text(); paths.add(path); } - bucketsPaths = paths.toArray(new String[paths.size()]); + bucketsPaths = paths.toArray(new String[0]); } else { throw new ParsingException( parser.getTokenLocation(), diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java index 7e63af31a9c86..1137610026989 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java @@ -78,7 +78,7 @@ protected void collectBucketValue(String bucketKey, Double bucketValue) { @Override protected InternalAggregation buildAggregation(Map metadata) { - String[] keys = maxBucketKeys.toArray(new String[maxBucketKeys.size()]); + String[] keys = maxBucketKeys.toArray(new String[0]); return new InternalBucketMetricValue(name(), keys, maxValue, format, metadata()); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java index 5f94c21da395e..e5d3d418cdbbc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java @@ -386,7 +386,7 @@ public static MovAvgPipelineAggregationBuilder parse( String path = parser.text(); paths.add(path); } - bucketsPaths = paths.toArray(new String[paths.size()]); + bucketsPaths = paths.toArray(new String[0]); } else { throw new ParsingException( parser.getTokenLocation(), diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java index e3140ed77d3d5..71d205c9c2228 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java @@ -220,7 +220,7 @@ public static SerialDiffPipelineAggregationBuilder parse(String reducerName, XCo String path = parser.text(); paths.add(path); } - bucketsPaths = paths.toArray(new String[paths.size()]); + bucketsPaths = paths.toArray(new String[0]); } else { throw new ParsingException( parser.getTokenLocation(), diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java index fe972028508e8..b93232d0c65f7 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/FetchSourceContext.java @@ -150,7 +150,7 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { list.add(parser.text()); } - includes = list.toArray(new String[list.size()]); + includes = list.toArray(new String[0]); } else if (token == XContentParser.Token.START_OBJECT) { String currentFieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -170,7 +170,7 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx ); } } - includes = includesList.toArray(new String[includesList.size()]); + includes = includesList.toArray(new String[0]); } else if (EXCLUDES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { List excludesList = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { @@ -184,7 +184,7 @@ public static FetchSourceContext fromXContent(XContentParser parser) throws IOEx ); } } - excludes = excludesList.toArray(new String[excludesList.size()]); + excludes = excludesList.toArray(new String[0]); } else { throw new ParsingException( parser.getTokenLocation(), diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java index 638e1619a3658..097acc2d73668 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightField.java @@ -137,7 +137,7 @@ public static HighlightField fromXContent(XContentParser parser) throws IOExcept while (parser.nextToken() != XContentParser.Token.END_ARRAY) { values.add(new Text(parser.text())); } - fragments = values.toArray(new Text[values.size()]); + fragments = values.toArray(new Text[0]); } else if (token == XContentParser.Token.VALUE_NULL) { fragments = null; } else { diff --git a/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java index a1fe3a02fb113..2068bfc38af56 100644 --- a/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/GeoDistanceSortBuilder.java @@ -239,7 +239,7 @@ public GeoDistanceSortBuilder points(GeoPoint... points) { * Returns the points to create the range distance facets from. */ public GeoPoint[] points() { - return this.points.toArray(new GeoPoint[this.points.size()]); + return this.points.toArray(new GeoPoint[0]); } /** @@ -574,7 +574,7 @@ public static GeoDistanceSortBuilder fromXContent(XContentParser parser, String } } - GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[geoPoints.size()])); + GeoDistanceSortBuilder result = new GeoDistanceSortBuilder(fieldName, geoPoints.toArray(new GeoPoint[0])); result.geoDistance(geoDistance); result.unit(unit); result.order(order); @@ -642,7 +642,7 @@ public BucketedSort buildBucketedSort(QueryShardContext context, int bucketSize, private GeoPoint[] localPoints() { // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed // on 2.x created indexes - GeoPoint[] localPoints = points.toArray(new GeoPoint[points.size()]); + GeoPoint[] localPoints = points.toArray(new GeoPoint[0]); if (GeoValidationMethod.isIgnoreMalformed(validation) == false) { for (GeoPoint point : localPoints) { if (GeoUtils.isValidLatitude(point.lat()) == false) { diff --git a/server/src/main/java/org/opensearch/search/sort/SortBuilder.java b/server/src/main/java/org/opensearch/search/sort/SortBuilder.java index a8ade60b5b64f..e59d8e9d33efa 100644 --- a/server/src/main/java/org/opensearch/search/sort/SortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/SortBuilder.java @@ -182,10 +182,7 @@ public static Optional buildSort(List> sortBuilde } if (sort) { return Optional.of( - new SortAndFormats( - new Sort(sortFields.toArray(new SortField[sortFields.size()])), - sortFormats.toArray(new DocValueFormat[sortFormats.size()]) - ) + new SortAndFormats(new Sort(sortFields.toArray(new SortField[0])), sortFormats.toArray(new DocValueFormat[0])) ); } } diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java index cbf1d332dfa1b..e9131015996d9 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -324,7 +324,7 @@ public void addCandidates(List candidates) { final Set set = new HashSet<>(candidates); Collections.addAll(set, this.candidates); - this.candidates = set.toArray(new Candidate[set.size()]); + this.candidates = set.toArray(new Candidate[0]); // Sort strongest to weakest: Arrays.sort(this.candidates, Collections.reverseOrder()); } diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/NoisyChannelSpellChecker.java b/server/src/main/java/org/opensearch/search/suggest/phrase/NoisyChannelSpellChecker.java index 49790c996371a..e8ba90f353f02 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/NoisyChannelSpellChecker.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/NoisyChannelSpellChecker.java @@ -129,7 +129,7 @@ public void end() { } double cutoffScore = Double.MIN_VALUE; CandidateScorer scorer = new CandidateScorer(wordScorer, numCorrections, gramSize); - CandidateSet[] candidateSets = candidateSetsList.toArray(new CandidateSet[candidateSetsList.size()]); + CandidateSet[] candidateSets = candidateSetsList.toArray(new CandidateSet[0]); if (confidence > 0.0) { Candidate[] candidates = new Candidate[candidateSets.length]; for (int i = 0; i < candidates.length; i++) { diff --git a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java index ef0ab4917de2c..fa43479bfd89e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java +++ b/server/src/main/java/org/opensearch/search/suggest/phrase/PhraseSuggester.java @@ -133,7 +133,7 @@ public Suggestion> innerExecute( try (TokenStream stream = tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField())) { checkerResult = checker.getCorrections( stream, - new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), gens.toArray(new CandidateGenerator[gens.size()])), + new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), gens.toArray(new CandidateGenerator[0])), suggestion.maxErrors(), suggestion.getShardSize(), wordScorer, diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index 94ab875091caf..cdad4384cf54e 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -586,11 +586,7 @@ public RestStatus status() { if (shardFailures.size() == 0) { return RestStatus.OK; } - return RestStatus.status( - successfulShards, - totalShards, - shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]) - ); + return RestStatus.status(successfulShards, totalShards, shardFailures.toArray(new ShardOperationFailedException[0])); } @Override diff --git a/server/src/main/java/org/opensearch/transport/FutureTransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/FutureTransportResponseHandler.java deleted file mode 100644 index 71651274ede40..0000000000000 --- a/server/src/main/java/org/opensearch/transport/FutureTransportResponseHandler.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.transport; - -import org.opensearch.threadpool.ThreadPool; - -/** - * A response handler to be used when all interaction will be done through the {@link TransportFuture}. - * - * @opensearch.internal - */ -public abstract class FutureTransportResponseHandler implements TransportResponseHandler { - - @Override - public void handleResponse(T response) {} - - @Override - public void handleException(TransportException exp) {} - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } -} diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index ee88e34a8a93b..b9bf035a7fa77 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -41,7 +41,6 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.component.AbstractLifecycleComponent; @@ -72,7 +71,6 @@ import java.net.UnknownHostException; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -793,18 +791,6 @@ public Transport.Connection getConnection(DiscoveryNode node) { } } - public Map getChannelVersion(DiscoveryNodes nodes) { - Map nodeChannelVersions = new HashMap<>(nodes.getSize()); - for (DiscoveryNode node : nodes) { - try { - nodeChannelVersions.putIfAbsent(node.getId(), connectionManager.getConnection(node).getVersion()); - } catch (Exception e) { - // ignore in case node is not connected - } - } - return nodeChannelVersions; - } - public final void sendChildRequest( final DiscoveryNode node, final String action, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 97a045872477d..a981c527b6da6 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -685,7 +685,7 @@ protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) { filteredNodes.add(node); } } - return filteredNodes.toArray(new String[filteredNodes.size()]); + return filteredNodes.toArray(new String[0]); } @Override diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index 3c2e34ebb9ed5..6c54303802407 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -88,7 +88,7 @@ public void testSerialization() throws Exception { } ClusterSearchShardsResponse clusterSearchShardsResponse = new ClusterSearchShardsResponse( clusterSearchShardsGroups, - nodes.toArray(new DiscoveryNode[nodes.size()]), + nodes.toArray(new DiscoveryNode[0]), indicesAndFilters ); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java index b206c2e19a65b..4596ee4ef77af 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -425,7 +425,7 @@ public static IndicesStatsResponse randomIndicesStatsResponse(final IndexMetadat } } return IndicesStatsTests.newIndicesStatsResponse( - shardStats.toArray(new ShardStats[shardStats.size()]), + shardStats.toArray(new ShardStats[0]), shardStats.size(), shardStats.size(), 0, diff --git a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java index 802257357b7a8..18b6cc37c37c4 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/stats/IndicesStatsResponseTests.java @@ -100,13 +100,7 @@ public void testGetIndices() { shardsCounter.incrementAndGet(); } } - final IndicesStatsResponse indicesStatsResponse = new IndicesStatsResponse( - shards.toArray(new ShardStats[shards.size()]), - 0, - 0, - 0, - null - ); + final IndicesStatsResponse indicesStatsResponse = new IndicesStatsResponse(shards.toArray(new ShardStats[0]), 0, 0, 0, null); Map indexStats = indicesStatsResponse.getIndices(); assertThat(indexStats.size(), is(noOfIndexes)); diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java index e7e1166eb57fa..acc612183eac7 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestModifierTests.java @@ -130,7 +130,7 @@ public void onFailure(Exception e) {} IndexResponse indexResponse = new IndexResponse(new ShardId("index", "_na_", 0), indexRequest.id(), 1, 17, 1, true); originalResponses.add(new BulkItemResponse(Integer.parseInt(indexRequest.id()), indexRequest.opType(), indexResponse)); } - bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[originalResponses.size()]), 0)); + bulkResponseListener.onResponse(new BulkResponse(originalResponses.toArray(new BulkItemResponse[0]), 0)); assertThat(responses.size(), Matchers.equalTo(32)); for (int i = 0; i < 32; i++) { diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 76142efc60b7d..0827e4fc20255 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -105,7 +105,7 @@ public void testNodesSelectors() { String nodeId = randomFrom(nodeIds); nodeSelectors.add(nodeId); } - String[] finalNodesIds = nodeSelectors.toArray(new String[nodeSelectors.size()]); + String[] finalNodesIds = nodeSelectors.toArray(new String[0]); TestNodesRequest request = new TestNodesRequest(finalNodesIds); action.new AsyncAction(null, request, new PlainActionFuture<>()).start(); Map> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear(); diff --git a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java index d9613176c0156..f586a3db1c05c 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/opensearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -308,7 +308,7 @@ protected TestConfig[] generateTestConfigs(int numberOfTests, TestDoc[] testDocs refresh(); - return configs.toArray(new TestConfig[configs.size()]); + return configs.toArray(new TestConfig[0]); } protected TestFieldSetting[] getFieldSettings() { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index b5c5d30c45c47..d7253e6f57b38 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -175,7 +175,7 @@ public void testUpdatesNodeWithNewRoles() throws Exception { when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -270,7 +270,7 @@ public void testJoinFailedForDecommissionedNode() throws Exception { when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index 67c4f6888c882..dba0afc9f3641 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -168,9 +168,9 @@ public void testResolveNodesIds() { expectedNodeIdsSet.add(discoveryNode.getId()); } - String[] resolvedNodesIds = discoveryNodes.resolveNodes(nodeSelectors.toArray(new String[nodeSelectors.size()])); + String[] resolvedNodesIds = discoveryNodes.resolveNodes(nodeSelectors.toArray(new String[0])); Arrays.sort(resolvedNodesIds); - String[] expectedNodesIds = expectedNodeIdsSet.toArray(new String[expectedNodeIdsSet.size()]); + String[] expectedNodesIds = expectedNodeIdsSet.toArray(new String[0]); Arrays.sort(expectedNodesIds); assertThat(resolvedNodesIds, equalTo(expectedNodesIds)); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java index b3d62ea9c6160..121694e880b10 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -238,7 +238,7 @@ public ClusterState randomInitialClusterState() { for (int i = 0; i < randomIntBetween(2, 5); i++) { allNodes.add(createNode()); } - ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); return state; } diff --git a/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java b/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java index 474f5e3ea09dd..9c1e3255bb3d8 100644 --- a/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java +++ b/server/src/test/java/org/opensearch/cluster/serialization/DiffableTests.java @@ -216,10 +216,7 @@ public abstract class MapDriver { protected final Set keysToRemove = new HashSet<>(randomSubsetOf(randomInt(keys.size()), keys.toArray(new Integer[0]))); protected final Set keysThatAreNotRemoved = Sets.difference(keys, keysToRemove); protected final Set keysToOverride = new HashSet<>( - randomSubsetOf( - randomInt(keysThatAreNotRemoved.size()), - keysThatAreNotRemoved.toArray(new Integer[keysThatAreNotRemoved.size()]) - ) + randomSubsetOf(randomInt(keysThatAreNotRemoved.size()), keysThatAreNotRemoved.toArray(new Integer[0])) ); // make sure keysToAdd does not contain elements in keys protected final Set keysToAdd = Sets.difference(randomPositiveIntSet(), keys); diff --git a/server/src/test/java/org/opensearch/common/collect/IteratorsTests.java b/server/src/test/java/org/opensearch/common/collect/IteratorsTests.java index 155391aaaf62f..6ad272542dbb1 100644 --- a/server/src/test/java/org/opensearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/opensearch/common/collect/IteratorsTests.java @@ -104,7 +104,7 @@ public void testRandomIterators() { } iterators[i] = theseValues.iterator(); } - assertContainsInOrder(Iterators.concat(iterators), values.toArray(new Integer[values.size()])); + assertContainsInOrder(Iterators.concat(iterators), values.toArray(new Integer[0])); } public void testTwoEntries() { diff --git a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java index 2ca692a8db374..9f47c5ae96394 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoJsonShapeParserTests.java @@ -352,7 +352,7 @@ public void testParsePolygon() throws IOException, ParseException { shellCoordinates.add(new Coordinate(101, 1)); shellCoordinates.add(new Coordinate(100, 1)); shellCoordinates.add(new Coordinate(100, 0)); - Coordinate[] coordinates = shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]); + Coordinate[] coordinates = shellCoordinates.toArray(new Coordinate[0]); LinearRing shell = GEOMETRY_FACTORY.createLinearRing(coordinates); Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); assertGeometryEquals(jtsGeom(expected), polygonGeoJson, true); @@ -404,7 +404,7 @@ public void testParse3DPolygon() throws IOException, ParseException { shellCoordinates.add(new Coordinate(101, 1, 10)); shellCoordinates.add(new Coordinate(100, 1, 10)); shellCoordinates.add(new Coordinate(100, 0, 10)); - Coordinate[] coordinates = shellCoordinates.toArray(new Coordinate[shellCoordinates.size()]); + Coordinate[] coordinates = shellCoordinates.toArray(new Coordinate[0]); Version randomVersion = VersionUtils.randomIndexCompatibleVersion(random()); Settings indexSettings = Settings.builder() @@ -414,7 +414,7 @@ public void testParse3DPolygon() throws IOException, ParseException { .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) .build(); - LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[0])); Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, null); Mapper.BuilderContext mockBuilderContext = new Mapper.BuilderContext(indexSettings, new ContentPath()); final LegacyGeoShapeFieldMapper mapperBuilder = (LegacyGeoShapeFieldMapper) (new LegacyGeoShapeFieldMapper.Builder("test") @@ -1389,9 +1389,9 @@ public void testParsePolygonWithHole() throws IOException, ParseException { holeCoordinates.add(new Coordinate(100.2, 0.8)); holeCoordinates.add(new Coordinate(100.2, 0.2)); - LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[0])); LinearRing[] holes = new LinearRing[1]; - holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()])); + holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[0])); Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes); assertGeometryEquals(jtsGeom(expected), polygonGeoJson, true); @@ -1574,9 +1574,9 @@ public void testParseMultiPolygon() throws IOException, ParseException { holeCoordinates.add(new Coordinate(100.2, 0.8)); holeCoordinates.add(new Coordinate(100.2, 0.2)); - LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[0])); LinearRing[] holes = new LinearRing[1]; - holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()])); + holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[0])); Polygon withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes); shellCoordinates = new ArrayList<>(); @@ -1586,7 +1586,7 @@ public void testParseMultiPolygon() throws IOException, ParseException { shellCoordinates.add(new Coordinate(102, 2)); shellCoordinates.add(new Coordinate(102, 3)); - shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[0])); Polygon withoutHoles = GEOMETRY_FACTORY.createPolygon(shell, null); Shape expected = shapeCollection(withoutHoles, withHoles); @@ -1688,9 +1688,9 @@ public void testParseMultiPolygon() throws IOException, ParseException { holeCoordinates.add(new Coordinate(100.8, 0.8)); holeCoordinates.add(new Coordinate(100.2, 0.8)); - shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[0])); holes = new LinearRing[1]; - holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()])); + holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[0])); withHoles = GEOMETRY_FACTORY.createPolygon(shell, holes); assertGeometryEquals(jtsGeom(withHoles), multiPolygonGeoJson, true); @@ -1789,8 +1789,8 @@ public void testParseGeometryCollection() throws IOException, ParseException { expected[0] = jtsGeom(expectedLineString); Point expectedPoint = GEOMETRY_FACTORY.createPoint(new Coordinate(102.0, 2.0)); expected[1] = new JtsPoint(expectedPoint, SPATIAL_CONTEXT); - LinearRing shell1 = GEOMETRY_FACTORY.createLinearRing(shellCoordinates1.toArray(new Coordinate[shellCoordinates1.size()])); - LinearRing shell2 = GEOMETRY_FACTORY.createLinearRing(shellCoordinates2.toArray(new Coordinate[shellCoordinates2.size()])); + LinearRing shell1 = GEOMETRY_FACTORY.createLinearRing(shellCoordinates1.toArray(new Coordinate[0])); + LinearRing shell2 = GEOMETRY_FACTORY.createLinearRing(shellCoordinates2.toArray(new Coordinate[0])); MultiPolygon expectedMultiPoly = GEOMETRY_FACTORY.createMultiPolygon( new Polygon[] { GEOMETRY_FACTORY.createPolygon(shell1), GEOMETRY_FACTORY.createPolygon(shell2) } ); diff --git a/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java index cd05331442be2..80164ef8a925c 100644 --- a/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/opensearch/common/geo/GeoWKTShapeParserTests.java @@ -180,7 +180,7 @@ private List randomLineStringCoords() { @Override public void testParseLineString() throws IOException, ParseException { List coordinates = randomLineStringCoords(); - LineString expected = GEOMETRY_FACTORY.createLineString(coordinates.toArray(new Coordinate[coordinates.size()])); + LineString expected = GEOMETRY_FACTORY.createLineString(coordinates.toArray(new Coordinate[0])); assertExpected(jtsGeom(expected), new LineStringBuilder(coordinates), true); double[] lats = new double[coordinates.size()]; @@ -199,7 +199,7 @@ public void testParseMultiLineString() throws IOException, ParseException { MultiLineStringBuilder builder = new MultiLineStringBuilder(); for (int j = 0; j < numLineStrings; ++j) { List lsc = randomLineStringCoords(); - Coordinate[] coords = lsc.toArray(new Coordinate[lsc.size()]); + Coordinate[] coords = lsc.toArray(new Coordinate[0]); lineStrings.add(GEOMETRY_FACTORY.createLineString(coords)); builder.linestring(new LineStringBuilder(lsc)); } @@ -220,7 +220,7 @@ public void testParseMultiLineString() throws IOException, ParseException { assertExpected(expectedGeom, builder, false); assertMalformed(builder); - MultiLineString expected = GEOMETRY_FACTORY.createMultiLineString(lineStrings.toArray(new LineString[lineStrings.size()])); + MultiLineString expected = GEOMETRY_FACTORY.createMultiLineString(lineStrings.toArray(new LineString[0])); assumeTrue("JTS test path cannot handle empty multilinestrings", numLineStrings > 1); assertExpected(jtsGeom(expected), builder, true); } @@ -278,9 +278,9 @@ public void testParsePolygonWithHole() throws IOException, ParseException { PolygonBuilder polygonWithHole = new PolygonBuilder(new CoordinatesBuilder().coordinates(shellCoordinates)); polygonWithHole.hole(new LineStringBuilder(holeCoordinates)); - LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[shellCoordinates.size()])); + LinearRing shell = GEOMETRY_FACTORY.createLinearRing(shellCoordinates.toArray(new Coordinate[0])); LinearRing[] holes = new LinearRing[1]; - holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[holeCoordinates.size()])); + holes[0] = GEOMETRY_FACTORY.createLinearRing(holeCoordinates.toArray(new Coordinate[0])); Polygon expected = GEOMETRY_FACTORY.createPolygon(shell, holes); assertExpected(jtsGeom(expected), polygonWithHole, true); diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java index 0edcd55cc35c3..2b54455f589fd 100644 --- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java @@ -92,62 +92,12 @@ import java.util.HashSet; import java.util.List; import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; public class LuceneTests extends OpenSearchTestCase { private static final NamedWriteableRegistry EMPTY_REGISTRY = new NamedWriteableRegistry(Collections.emptyList()); - public void testWaitForIndex() throws Exception { - final MockDirectoryWrapper dir = newMockDirectory(); - - final AtomicBoolean succeeded = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - - // Create a shadow Engine, which will freak out because there is no - // index yet - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - latch.await(); - if (Lucene.waitForIndex(dir, 5000)) { - succeeded.set(true); - } else { - fail("index should have eventually existed!"); - } - } catch (InterruptedException e) { - // ignore interruptions - } catch (Exception e) { - fail("should have been able to create the engine! " + e.getMessage()); - } - } - }); - t.start(); - - // count down latch - // now shadow engine should try to be created - latch.countDown(); - - IndexWriterConfig iwc = newIndexWriterConfig(); - iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); - iwc.setMergePolicy(NoMergePolicy.INSTANCE); - iwc.setMaxBufferedDocs(2); - IndexWriter writer = new IndexWriter(dir, iwc); - Document doc = new Document(); - doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - writer.addDocument(doc); - writer.commit(); - - t.join(); - - writer.close(); - dir.close(); - assertTrue("index should have eventually existed", succeeded.get()); - } - public void testCleanIndex() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(); diff --git a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java index 8d8ba9872ee61..bbdb255424030 100644 --- a/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java +++ b/server/src/test/java/org/opensearch/common/xcontent/support/XContentMapValuesTests.java @@ -73,13 +73,13 @@ protected void testFilter(Builder expected, Builder actual, Set includes if (includes == null) { sourceIncludes = randomBoolean() ? Strings.EMPTY_ARRAY : null; } else { - sourceIncludes = includes.toArray(new String[includes.size()]); + sourceIncludes = includes.toArray(new String[0]); } String[] sourceExcludes; if (excludes == null) { sourceExcludes = randomBoolean() ? Strings.EMPTY_ARRAY : null; } else { - sourceExcludes = excludes.toArray(new String[excludes.size()]); + sourceExcludes = excludes.toArray(new String[0]); } assertEquals( diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index d886922d56882..9c2ba140cdc09 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -266,7 +266,7 @@ public void testResolveIndexFolders() throws Exception { } for (Map.Entry> actualIndexDataPathEntry : actualIndexDataPaths.entrySet()) { List actual = actualIndexDataPathEntry.getValue(); - Path[] actualPaths = actual.toArray(new Path[actual.size()]); + Path[] actualPaths = actual.toArray(new Path[0]); assertThat(actualPaths, equalTo(env.resolveIndexFolder(actualIndexDataPathEntry.getKey()))); } assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty()); diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java index 72ca8bff4087d..faab2f405010a 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java @@ -8,6 +8,8 @@ package org.opensearch.index; +import org.hamcrest.Matcher; +import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.cluster.service.ClusterService; @@ -23,6 +25,10 @@ import java.util.concurrent.atomic.AtomicInteger; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + public class ShardIndexingPressureConcurrentExecutionTests extends OpenSearchTestCase { private final Settings settings = Settings.builder() @@ -34,8 +40,8 @@ public class ShardIndexingPressureConcurrentExecutionTests extends OpenSearchTes .put(ShardIndexingPressureSettings.REQUEST_SIZE_WINDOW.getKey(), 100) .build(); - final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); + private final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + private final ClusterService clusterService = new ClusterService(settings, clusterSettings, null); public enum OperationType { COORDINATING, @@ -71,15 +77,11 @@ public void testCoordinatingPrimaryThreadedUpdateToShardLimits() throws Exceptio NUM_THREADS * 15, shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1).getCurrentCombinedCoordinatingAndPrimaryBytes() ); - assertTrue( + MatcherAssert.assertThat( (double) (NUM_THREADS * 15) / shardIndexingPressure.shardStats() .getIndexingPressureShardStats(shardId1) - .getCurrentPrimaryAndCoordinatingLimits() < 0.95 - ); - assertTrue( - (double) (NUM_THREADS * 15) / shardIndexingPressure.shardStats() - .getIndexingPressureShardStats(shardId1) - .getCurrentPrimaryAndCoordinatingLimits() > 0.75 + .getCurrentPrimaryAndCoordinatingLimits(), + isInOperatingFactorRange() ); for (int i = 0; i < NUM_THREADS; i++) { @@ -112,15 +114,11 @@ public void testReplicaThreadedUpdateToShardLimits() throws Exception { Releasable[] releasable = fireConcurrentRequests(NUM_THREADS, shardIndexingPressure, shardId1, 15, OperationType.REPLICA); assertEquals(NUM_THREADS * 15, shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1).getCurrentReplicaBytes()); - assertTrue( - (double) (NUM_THREADS * 15) / shardIndexingPressure.shardStats() - .getIndexingPressureShardStats(shardId1) - .getCurrentReplicaLimits() < 0.95 - ); - assertTrue( + MatcherAssert.assertThat( (double) (NUM_THREADS * 15) / shardIndexingPressure.shardStats() .getIndexingPressureShardStats(shardId1) - .getCurrentReplicaLimits() > 0.75 + .getCurrentReplicaLimits(), + isInOperatingFactorRange() ); for (int i = 0; i < NUM_THREADS; i++) { @@ -1087,4 +1085,11 @@ private void fireConcurrentAndParallelRequestsForUniformThroughPut( t.join(); } } + + private Matcher isInOperatingFactorRange() { + return allOf( + greaterThan(ShardIndexingPressureMemoryManager.LOWER_OPERATING_FACTOR.get(settings)), + lessThanOrEqualTo(ShardIndexingPressureMemoryManager.UPPER_OPERATING_FACTOR.get(settings)) + ); + } } diff --git a/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java b/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java index 01281ea323e60..0446ac78d4efc 100644 --- a/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java +++ b/server/src/test/java/org/opensearch/index/analysis/AnalysisTests.java @@ -136,14 +136,24 @@ public void testParseWordListError() throws IOException { assertEquals("Line [1]: Error while parsing rule = abcd", ex.getMessage()); } - public void testParseWordListOutsideConfigDirError() { + public void testParseWordListOutsideConfigDirError() throws IOException { Path home = createTempDir(); - Path dict = home.resolve("/etc/os-release"); + Path temp = createTempDir(); + Path dict = temp.resolve("foo.dict"); + try (BufferedWriter writer = Files.newBufferedWriter(dict, StandardCharsets.UTF_8)) { + writer.write("abcd"); + writer.write('\n'); + } Settings nodeSettings = Settings.builder().put("foo.bar_path", dict).put(Environment.PATH_HOME_SETTING.getKey(), home).build(); Environment env = TestEnvironment.newEnvironment(nodeSettings); RuntimeException ex = expectThrows( RuntimeException.class, - () -> Analysis.parseWordList(env, nodeSettings, "foo.bar", s -> { throw new RuntimeException("Error while parsing"); }) + () -> Analysis.parseWordList( + env, + nodeSettings, + "foo.bar", + s -> { throw new RuntimeException("Error while parsing rule = " + s); } + ) ); assertEquals("Line [1]: Invalid rule", ex.getMessage()); } diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java index 060901a3eba38..1b4c95d9ceb8f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java @@ -164,12 +164,6 @@ public void testLatLonInOneValueArray() throws Exception { assertThat(doc.rootDoc().getFields("field"), arrayWithSize(4)); } - public void testLatLonInArrayMoreThanThreeValues() throws Exception { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_z_value", true))); - Exception e = expectThrows(MapperParsingException.class, () -> mapper.parse(source(b -> b.array("field", 1.2, 1.3, 1.4, 1.5)))); - assertThat(e.getCause().getMessage(), containsString("[geo_point] field type does not accept more than 3 values")); - } - public void testLonLatArray() throws Exception { DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); ParsedDocument doc = mapper.parse(source(b -> b.startArray("field").value(1.3).value(1.2).endArray())); diff --git a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java index 2061378c3f54f..9c670d6cc65ca 100644 --- a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -246,10 +246,7 @@ protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsReque if (request.doc() != null) { generatedFields = generateFields(randomFields, request.doc().utf8ToString()); } else { - generatedFields = generateFields( - request.selectedFields().toArray(new String[request.selectedFields().size()]), - request.id() - ); + generatedFields = generateFields(request.selectedFields().toArray(new String[0]), request.id()); } EnumSet flags = EnumSet.of(TermVectorsRequest.Flag.Positions, TermVectorsRequest.Flag.Offsets); response.setFields(generatedFields, request.selectedFields(), flags, generatedFields); diff --git a/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java index 31bb078fa4c1d..6366951329788 100644 --- a/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RegexpQueryBuilderTests.java @@ -57,7 +57,7 @@ protected RegexpQueryBuilder doCreateTestQueryBuilder() { for (int i = 0; i < iter; i++) { flags.add(randomFrom(RegexpFlag.values())); } - query.flags(flags.toArray(new RegexpFlag[flags.size()])); + query.flags(flags.toArray(new RegexpFlag[0])); } if (randomBoolean()) { query.caseInsensitive(true); diff --git a/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java index 1101da8de70fe..fa8646d8628a0 100644 --- a/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/SimpleQueryStringBuilderTests.java @@ -108,7 +108,7 @@ protected SimpleQueryStringBuilder doCreateTestQueryBuilder() { flagSet.add(randomFrom(SimpleQueryStringFlag.values())); } if (flagSet.size() > 0) { - result.flags(flagSet.toArray(new SimpleQueryStringFlag[flagSet.size()])); + result.flags(flagSet.toArray(new SimpleQueryStringFlag[0])); } } diff --git a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java index e9a285208f1a6..4c8d48de114fa 100644 --- a/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/TermsQueryBuilderTests.java @@ -232,7 +232,7 @@ public GetResponse executeGet(GetRequest getRequest) { try { XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint(); builder.startObject(); - builder.array(termsPath, randomTerms.toArray(new Object[randomTerms.size()])); + builder.array(termsPath, randomTerms.toArray(new Object[0])); builder.endObject(); json = Strings.toString(builder); } catch (IOException ex) { diff --git a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java index 3a450e1f72a8d..7a79c58768575 100644 --- a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java @@ -235,7 +235,7 @@ public void testConcurrentReplica() throws InterruptedException { seqNoPerThread[t] = randomSubsetOf(size, seqNos).toArray(new Integer[size]); seqNos.removeAll(Arrays.asList(seqNoPerThread[t])); } - seqNoPerThread[threads.length - 1] = seqNos.toArray(new Integer[seqNos.size()]); + seqNoPerThread[threads.length - 1] = seqNos.toArray(new Integer[0]); logger.info("--> will run [{}] threads, maxOps [{}], unfinished seq no [{}]", threads.length, maxOps, unFinishedSeq); final CyclicBarrier barrier = new CyclicBarrier(threads.length); for (int t = 0; t < threads.length; t++) { diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 5d317693e02df..950ba047df19d 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; @@ -36,72 +37,6 @@ public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchI .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, "true") .build(); - public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { - - // Step1 - Start primary, index docs and flush - shards.startPrimary(); - final IndexShard primary = shards.getPrimary(); - int numDocs = shards.indexDocs(randomIntBetween(10, 100)); - shards.flush(); - - // Step 2 - Start replica for recovery to happen, check both has same number of docs - final IndexShard replica1 = shards.addReplica(); - shards.startAll(); - assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); - - // Step 3 - Index more docs, run segment replication, check both have same number of docs - int moreDocs = shards.indexDocs(randomIntBetween(10, 100)); - primary.refresh("test"); - replicateSegments(primary, shards.getReplicas()); - assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); - - // Step 4 - Check both shard has expected number of doc count - assertDocCount(primary, numDocs + moreDocs); - assertDocCount(replica1, numDocs + moreDocs); - - // Step 5 - Start new replica, recovery happens, and check that new replica has docs upto last flush - final IndexShard replica2 = shards.addReplica(); - shards.startAll(); - assertDocCount(replica2, numDocs); - - // Step 6 - Segment replication, check all shards have same number of docs - replicateSegments(primary, shards.getReplicas()); - shards.assertAllEqual(numDocs + moreDocs); - } - } - - public void testNoTranslogHistoryTransferred() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { - - // Step1 - Start primary, index docs, flush, index more docs, check translog in primary as expected - shards.startPrimary(); - final IndexShard primary = shards.getPrimary(); - int numDocs = shards.indexDocs(randomIntBetween(10, 100)); - shards.flush(); - List docIdAndSeqNosAfterFlush = getDocIdAndSeqNos(primary); - int moreDocs = shards.indexDocs(randomIntBetween(20, 100)); - assertEquals(moreDocs, getTranslog(primary).totalOperations()); - - // Step 2 - Start replica, recovery happens, check docs recovered till last flush - final IndexShard replica = shards.addReplica(); - shards.startAll(); - assertEquals(docIdAndSeqNosAfterFlush, getDocIdAndSeqNos(replica)); - assertDocCount(replica, numDocs); - assertEquals(NRTReplicationEngine.class, replica.getEngine().getClass()); - - // Step 3 - Check replica's translog has no operations - assertEquals(WriteOnlyTranslogManager.class, replica.getEngine().translogManager().getClass()); - WriteOnlyTranslogManager replicaTranslogManager = (WriteOnlyTranslogManager) replica.getEngine().translogManager(); - assertEquals(0, replicaTranslogManager.getTranslog().totalOperations()); - - // Adding this for close to succeed - shards.flush(); - replicateSegments(primary, shards.getReplicas()); - shards.assertAllEqual(numDocs + moreDocs); - } - } - public void testStartSequenceForReplicaRecovery() throws Exception { try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { @@ -146,20 +81,23 @@ public void testStartSequenceForReplicaRecovery() throws Exception { null ); shards.addReplica(newReplicaShard); + AtomicBoolean assertDone = new AtomicBoolean(false); shards.recoverReplica(newReplicaShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener) { @Override public IndexShard indexShard() { IndexShard idxShard = super.indexShard(); - // verify the starting sequence number while recovering a failed shard which has a valid last commit - long startingSeqNo = -1; - try { - startingSeqNo = Long.parseLong( - idxShard.store().readLastCommittedSegmentsInfo().getUserData().get(SequenceNumbers.MAX_SEQ_NO) - ); - } catch (IOException e) { - Assert.fail(); + if (assertDone.compareAndSet(false, true)) { + // verify the starting sequence number while recovering a failed shard which has a valid last commit + long startingSeqNo = -1; + try { + startingSeqNo = Long.parseLong( + idxShard.store().readLastCommittedSegmentsInfo().getUserData().get(SequenceNumbers.MAX_SEQ_NO) + ); + } catch (IOException e) { + Assert.fail(); + } + assertEquals(numDocs - 1, startingSeqNo); } - assertEquals(numDocs - 1, startingSeqNo); return idxShard; } }); @@ -169,4 +107,35 @@ public IndexShard indexShard() { shards.assertAllEqual(numDocs + moreDocs); } } + + public void testNoTranslogHistoryTransferred() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + + // Step1 - Start primary, index docs, flush, index more docs, check translog in primary as expected + shards.startPrimary(); + final IndexShard primary = shards.getPrimary(); + int numDocs = shards.indexDocs(randomIntBetween(10, 100)); + shards.flush(); + List docIdAndSeqNosAfterFlush = getDocIdAndSeqNos(primary); + int moreDocs = shards.indexDocs(randomIntBetween(20, 100)); + assertEquals(moreDocs, getTranslog(primary).totalOperations()); + + // Step 2 - Start replica, recovery happens, check docs recovered till last flush + final IndexShard replica = shards.addReplica(); + shards.startAll(); + assertEquals(docIdAndSeqNosAfterFlush, getDocIdAndSeqNos(replica)); + assertDocCount(replica, numDocs); + assertEquals(NRTReplicationEngine.class, replica.getEngine().getClass()); + + // Step 3 - Check replica's translog has no operations + assertEquals(WriteOnlyTranslogManager.class, replica.getEngine().translogManager().getClass()); + WriteOnlyTranslogManager replicaTranslogManager = (WriteOnlyTranslogManager) replica.getEngine().translogManager(); + assertEquals(0, replicaTranslogManager.getTranslog().totalOperations()); + + // Adding this for close to succeed + shards.flush(); + replicateSegments(primary, shards.getReplicas()); + shards.assertAllEqual(numDocs + moreDocs); + } + } } diff --git a/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java b/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java index 0277e420f74ed..2d15eb3ae8497 100644 --- a/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java +++ b/server/src/test/java/org/opensearch/index/translog/SnapshotMatchers.java @@ -68,7 +68,7 @@ public static Matcher equalsTo(Translog.Operation... ops) { * Consumes a snapshot and make sure it's content is as expected */ public static Matcher equalsTo(List ops) { - return new EqualMatcher(ops.toArray(new Translog.Operation[ops.size()])); + return new EqualMatcher(ops.toArray(new Translog.Operation[0])); } public static Matcher containsOperationsInAnyOrder(Collection expectedOperations) { diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 7fe17e570d157..7cd57786df054 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -357,7 +357,7 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m ); nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); - joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, p, r) -> {}, transportService); + joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, p, r) -> {}); } public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 22481b5a7b99f..ce4142363a54e 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -338,7 +338,7 @@ public ClusterState randomInitialClusterState( for (int i = 0; i < randomIntBetween(2, 5); i++) { allNodes.add(createNode()); } - ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); // add nodes to clusterStateServiceMap updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); return state; @@ -420,7 +420,7 @@ public ClusterState randomlyUpdateClusterState( indicesToDelete.add(state.metadata().index(index).getIndex().getName()); } if (indicesToDelete.isEmpty() == false) { - DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()])); + DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[0])); state = cluster.deleteIndices(state, deleteRequest); for (String index : indicesToDelete) { assertFalse(state.metadata().hasIndex(index)); @@ -452,9 +452,7 @@ public ClusterState randomlyUpdateClusterState( } } if (indicesToUpdate.isEmpty() == false) { - UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest( - indicesToUpdate.toArray(new String[indicesToUpdate.size()]) - ); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indicesToUpdate.toArray(new String[0])); Settings.Builder settings = Settings.builder(); if (containsClosedIndex == false) { settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2)); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandlerTests.java similarity index 97% rename from server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java rename to server/src/test/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandlerTests.java index 2b5550b71a627..7761f97769440 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/LocalStorePeerRecoverySourceHandlerTests.java @@ -143,7 +143,10 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class RecoverySourceHandlerTests extends OpenSearchTestCase { +/** + * This covers test cases for {@link RecoverySourceHandler} and {@link LocalStorePeerRecoverySourceHandler}. + */ +public class LocalStorePeerRecoverySourceHandlerTests extends OpenSearchTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( "index", Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build() @@ -215,7 +218,7 @@ public void writeFileChunk( }); } }; - RecoverySourceHandler handler = new RecoverySourceHandler( + RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( null, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, @@ -296,7 +299,7 @@ public void indexTranslogOperations( listener.onResponse(checkpointOnTarget.get()); } }; - RecoverySourceHandler handler = new RecoverySourceHandler( + RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( shard, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), threadPool, @@ -359,7 +362,7 @@ public void indexTranslogOperations( } } }; - RecoverySourceHandler handler = new RecoverySourceHandler( + RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( shard, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), threadPool, @@ -433,7 +436,7 @@ public void indexTranslogOperations( Randomness.shuffle(operations); List skipOperations = randomSubsetOf(operations); Translog.Snapshot snapshot = newTranslogSnapshot(operations, skipOperations); - RecoverySourceHandler handler = new RecoverySourceHandler( + RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( shard, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, @@ -552,7 +555,7 @@ public void writeFileChunk( failedEngine.set(true); return null; }).when(mockShard).failShard(any(), any()); - RecoverySourceHandler handler = new RecoverySourceHandler( + RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( mockShard, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, @@ -627,7 +630,7 @@ public void writeFileChunk( failedEngine.set(true); return null; }).when(mockShard).failShard(any(), any()); - RecoverySourceHandler handler = new RecoverySourceHandler( + RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( mockShard, new AsyncRecoveryTarget(target, recoveryExecutor), threadPool, @@ -680,7 +683,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE final AtomicBoolean phase1Called = new AtomicBoolean(); final AtomicBoolean prepareTargetForTranslogCalled = new AtomicBoolean(); final AtomicBoolean phase2Called = new AtomicBoolean(); - final RecoverySourceHandler handler = new RecoverySourceHandler( + final RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( shard, mock(RecoveryTargetHandler.class), threadPool, @@ -691,9 +694,15 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE ) { @Override - void phase1(IndexCommit snapshot, long startingSeqNo, IntSupplier translogOps, ActionListener listener) { + void phase1( + IndexCommit snapshot, + long startingSeqNo, + IntSupplier translogOps, + ActionListener listener, + boolean skipCreateRetentionLeaseStep + ) { phase1Called.set(true); - super.phase1(snapshot, startingSeqNo, translogOps, listener); + super.phase1(snapshot, startingSeqNo, translogOps, listener, skipCreateRetentionLeaseStep); } @Override @@ -786,7 +795,7 @@ public void writeFileChunk( }; final int maxConcurrentChunks = between(1, 8); final int chunkSize = between(1, 32); - final RecoverySourceHandler handler = new RecoverySourceHandler( + final RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( shard, recoveryTarget, threadPool, @@ -859,7 +868,7 @@ public void writeFileChunk( }; final int maxConcurrentChunks = between(1, 4); final int chunkSize = between(1, 16); - final RecoverySourceHandler handler = new RecoverySourceHandler( + final RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( null, new AsyncRecoveryTarget(recoveryTarget, recoveryExecutor), threadPool, @@ -967,7 +976,7 @@ public void cleanFiles( } }; final StartRecoveryRequest startRecoveryRequest = getStartRecoveryRequest(); - final RecoverySourceHandler handler = new RecoverySourceHandler( + final RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( shard, recoveryTarget, threadPool, @@ -993,7 +1002,7 @@ void createRetentionLease(long startingSeqNo, ActionListener lis final StepListener phase1Listener = new StepListener<>(); try { final CountDownLatch latch = new CountDownLatch(1); - handler.phase1(DirectoryReader.listCommits(dir).get(0), 0, () -> 0, new LatchedActionListener<>(phase1Listener, latch)); + handler.phase1(DirectoryReader.listCommits(dir).get(0), 0, () -> 0, new LatchedActionListener<>(phase1Listener, latch), false); latch.await(); phase1Listener.result(); } catch (Exception e) { @@ -1006,7 +1015,7 @@ void createRetentionLease(long startingSeqNo, ActionListener lis public void testVerifySeqNoStatsWhenRecoverWithSyncId() throws Exception { IndexShard shard = mock(IndexShard.class); when(shard.state()).thenReturn(IndexShardState.STARTED); - RecoverySourceHandler handler = new RecoverySourceHandler( + RecoverySourceHandler handler = new LocalStorePeerRecoverySourceHandler( shard, new TestRecoveryTargetHandler(), threadPool, @@ -1061,7 +1070,7 @@ private Store newStore(Path path) throws IOException { } private Store newStore(Path path, boolean checkIndex) throws IOException { - BaseDirectoryWrapper baseDirectoryWrapper = RecoverySourceHandlerTests.newFSDirectory(path); + BaseDirectoryWrapper baseDirectoryWrapper = LocalStorePeerRecoverySourceHandlerTests.newFSDirectory(path); if (checkIndex == false) { baseDirectoryWrapper.setCheckIndexOnClose(false); // don't run checkindex we might corrupt the index in these tests } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java new file mode 100644 index 0000000000000..91953d4db3495 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java @@ -0,0 +1,69 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; +import org.opensearch.index.seqno.ReplicationTracker; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.replication.common.ReplicationType; + +public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLevelReplicationTestCase { + + private static final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, "true") + .build(); + + public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + + // Step1 - Start primary, index docs and flush + shards.startPrimary(); + final IndexShard primary = shards.getPrimary(); + int numDocs = shards.indexDocs(randomIntBetween(10, 100)); + shards.flush(); + + // Step 2 - Start replica for recovery to happen, check both has same number of docs + final IndexShard replica1 = shards.addReplica(); + shards.startAll(); + assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); + + // Step 3 - Index more docs, run segment replication, check both have same number of docs + int moreDocs = shards.indexDocs(randomIntBetween(10, 100)); + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); + + // Step 4 - Check both shard has expected number of doc count + assertDocCount(primary, numDocs + moreDocs); + assertDocCount(replica1, numDocs + moreDocs); + + // Step 5 - Check retention lease does not exist for the replica shard + assertEquals(1, primary.getRetentionLeases().leases().size()); + assertFalse(primary.getRetentionLeases().contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replica1.routingEntry()))); + + // Step 6 - Start new replica, recovery happens, and check that new replica has docs upto last flush + final IndexShard replica2 = shards.addReplica(); + shards.startAll(); + assertDocCount(replica2, numDocs); + + // Step 7 - Segment replication, check all shards have same number of docs + replicateSegments(primary, shards.getReplicas()); + shards.assertAllEqual(numDocs + moreDocs); + + // Step 8 - Check retention lease does not exist for the replica shard + assertEquals(1, primary.getRetentionLeases().leases().size()); + assertFalse(primary.getRetentionLeases().contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replica2.routingEntry()))); + } + } +} diff --git a/server/src/test/java/org/opensearch/indices/store/IndicesStoreTests.java b/server/src/test/java/org/opensearch/indices/store/IndicesStoreTests.java index a3130400719ad..d50145b08ee3d 100644 --- a/server/src/test/java/org/opensearch/indices/store/IndicesStoreTests.java +++ b/server/src/test/java/org/opensearch/indices/store/IndicesStoreTests.java @@ -56,7 +56,7 @@ public class IndicesStoreTests extends OpenSearchTestCase { Set set = new HashSet<>(); set.addAll(Arrays.asList(ShardRoutingState.values())); set.remove(ShardRoutingState.STARTED); - NOT_STARTED_STATES = set.toArray(new ShardRoutingState[set.size()]); + NOT_STARTED_STATES = set.toArray(new ShardRoutingState[0]); } private DiscoveryNode localNode; diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index ae7f795f57ee7..3a6743a334566 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -319,7 +319,7 @@ public void testCreatePitMoreThanMaxOpenPitContexts() throws Exception { final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); validatePitStats("index", maxPitContexts, 0, 0); // deleteall - DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds.toArray(new String[pitIds.size()])); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds.toArray(new String[0])); /** * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java index 94fb6cded637d..050965b37c068 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregationsTests.java @@ -101,7 +101,6 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -116,60 +115,56 @@ * */ public class AggregationsTests extends OpenSearchTestCase { - private static final List> aggsTests = getAggsTests(); - - private static List> getAggsTests() { - List> aggsTests = new ArrayList<>(); - aggsTests.add(new InternalCardinalityTests()); - aggsTests.add(new InternalTDigestPercentilesTests()); - aggsTests.add(new InternalTDigestPercentilesRanksTests()); - aggsTests.add(new InternalHDRPercentilesTests()); - aggsTests.add(new InternalHDRPercentilesRanksTests()); - aggsTests.add(new InternalPercentilesBucketTests()); - aggsTests.add(new InternalMinTests()); - aggsTests.add(new InternalMaxTests()); - aggsTests.add(new InternalAvgTests()); - aggsTests.add(new InternalWeightedAvgTests()); - aggsTests.add(new InternalSumTests()); - aggsTests.add(new InternalValueCountTests()); - aggsTests.add(new InternalSimpleValueTests()); - aggsTests.add(new InternalDerivativeTests()); - aggsTests.add(new InternalBucketMetricValueTests()); - aggsTests.add(new InternalStatsTests()); - aggsTests.add(new InternalStatsBucketTests()); - aggsTests.add(new InternalExtendedStatsTests()); - aggsTests.add(new InternalExtendedStatsBucketTests()); - aggsTests.add(new InternalGeoCentroidTests()); - aggsTests.add(new InternalHistogramTests()); - aggsTests.add(new InternalDateHistogramTests()); - aggsTests.add(new InternalAutoDateHistogramTests()); - aggsTests.add(new InternalVariableWidthHistogramTests()); - aggsTests.add(new LongTermsTests()); - aggsTests.add(new DoubleTermsTests()); - aggsTests.add(new StringTermsTests()); - aggsTests.add(new LongRareTermsTests()); - aggsTests.add(new StringRareTermsTests()); - aggsTests.add(new InternalMissingTests()); - aggsTests.add(new InternalNestedTests()); - aggsTests.add(new InternalReverseNestedTests()); - aggsTests.add(new InternalGlobalTests()); - aggsTests.add(new InternalFilterTests()); - aggsTests.add(new InternalSamplerTests()); - aggsTests.add(new InternalRangeTests()); - aggsTests.add(new InternalDateRangeTests()); - aggsTests.add(new InternalGeoDistanceTests()); - aggsTests.add(new InternalFiltersTests()); - aggsTests.add(new InternalAdjacencyMatrixTests()); - aggsTests.add(new SignificantLongTermsTests()); - aggsTests.add(new SignificantStringTermsTests()); - aggsTests.add(new InternalScriptedMetricTests()); - aggsTests.add(new InternalBinaryRangeTests()); - aggsTests.add(new InternalTopHitsTests()); - aggsTests.add(new InternalCompositeTests()); - aggsTests.add(new InternalMedianAbsoluteDeviationTests()); - aggsTests.add(new InternalMultiTermsTests()); - return Collections.unmodifiableList(aggsTests); - } + private static final List> aggsTests = List.of( + new InternalCardinalityTests(), + new InternalTDigestPercentilesTests(), + new InternalTDigestPercentilesRanksTests(), + new InternalHDRPercentilesTests(), + new InternalHDRPercentilesRanksTests(), + new InternalPercentilesBucketTests(), + new InternalMinTests(), + new InternalMaxTests(), + new InternalAvgTests(), + new InternalWeightedAvgTests(), + new InternalSumTests(), + new InternalValueCountTests(), + new InternalSimpleValueTests(), + new InternalDerivativeTests(), + new InternalBucketMetricValueTests(), + new InternalStatsTests(), + new InternalStatsBucketTests(), + new InternalExtendedStatsTests(), + new InternalExtendedStatsBucketTests(), + new InternalGeoCentroidTests(), + new InternalHistogramTests(), + new InternalDateHistogramTests(), + new InternalAutoDateHistogramTests(), + new InternalVariableWidthHistogramTests(), + new LongTermsTests(), + new DoubleTermsTests(), + new StringTermsTests(), + new LongRareTermsTests(), + new StringRareTermsTests(), + new InternalMissingTests(), + new InternalNestedTests(), + new InternalReverseNestedTests(), + new InternalGlobalTests(), + new InternalFilterTests(), + new InternalSamplerTests(), + new InternalRangeTests(), + new InternalDateRangeTests(), + new InternalGeoDistanceTests(), + new InternalFiltersTests(), + new InternalAdjacencyMatrixTests(), + new SignificantLongTermsTests(), + new SignificantStringTermsTests(), + new InternalScriptedMetricTests(), + new InternalBinaryRangeTests(), + new InternalTopHitsTests(), + new InternalCompositeTests(), + new InternalMedianAbsoluteDeviationTests(), + new InternalMultiTermsTests() + ); @Override protected NamedXContentRegistry xContentRegistry() { @@ -226,7 +221,7 @@ public void testFromXContentWithRandomFields() throws IOException { private void parseAndAssert(boolean addRandomFields) throws IOException { XContentType xContentType = randomFrom(XContentType.values()); final ToXContent.Params params = new ToXContent.MapParams(singletonMap(RestSearchAction.TYPED_KEYS_PARAM, "true")); - Aggregations aggregations = createTestInstance(); + Aggregations aggregations = createTestInstance(1, 0, 3); BytesReference originalBytes = toShuffledXContent(aggregations, xContentType, params, randomBoolean()); BytesReference mutated; if (addRandomFields) { diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/AggregationPathTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/AggregationPathTests.java index a45f890216bce..4f88e5ca55b4c 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/AggregationPathTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/AggregationPathTests.java @@ -99,7 +99,7 @@ Tokens add(String name, String key) { } AggregationPath.PathElement[] toArray() { - return tokens.toArray(new AggregationPath.PathElement[tokens.size()]); + return tokens.toArray(new AggregationPath.PathElement[0]); } } } diff --git a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java index 89834f80b59df..13ca6c6f918ea 100644 --- a/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/subphase/highlight/HighlightBuilderTests.java @@ -760,7 +760,7 @@ private static String[] randomStringArray(int minSize, int maxSize) { for (int f = 0; f < size; f++) { randomStrings.add(randomAlphaOfLengthBetween(3, 10)); } - return randomStrings.toArray(new String[randomStrings.size()]); + return randomStrings.toArray(new String[0]); } /** diff --git a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java index bcf458c5028cd..7dff005dc4f0a 100644 --- a/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/FieldSortBuilderTests.java @@ -707,7 +707,7 @@ protected void assertWarnings(FieldSortBuilder testItem) { expectedWarnings.add(nestedPathDeprecationWarning); } if (expectedWarnings.isEmpty() == false) { - assertWarnings(expectedWarnings.toArray(new String[expectedWarnings.size()])); + assertWarnings(expectedWarnings.toArray(new String[0])); assertedWarnings.addAll(expectedWarnings); } } diff --git a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java index 87adbd9532665..4650e945f42a9 100644 --- a/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/GeoDistanceSortBuilderTests.java @@ -419,7 +419,7 @@ protected void assertWarnings(GeoDistanceSortBuilder testItem) { expectedWarnings.add(nestedPathDeprecationWarning); } if (expectedWarnings.isEmpty() == false) { - assertWarnings(expectedWarnings.toArray(new String[expectedWarnings.size()])); + assertWarnings(expectedWarnings.toArray(new String[0])); assertedWarnings.addAll(expectedWarnings); } } diff --git a/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java b/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java index 36acb1ba2f3e0..84f01c50f706b 100644 --- a/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/sort/SortBuilderTests.java @@ -202,7 +202,7 @@ public void testRandomSortBuilders() throws IOException { assertEquals(iterator.next(), parsedBuilder); } if (expectedWarningHeaders.size() > 0) { - assertWarnings(expectedWarningHeaders.toArray(new String[expectedWarningHeaders.size()])); + assertWarnings(expectedWarningHeaders.toArray(new String[0])); assertedWarnings.addAll(expectedWarningHeaders); } } diff --git a/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java b/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java index 96a5acf7e0b3d..511cf7c3e9ce9 100644 --- a/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java +++ b/server/src/test/java/org/opensearch/test/hamcrest/OpenSearchGeoAssertions.java @@ -99,7 +99,7 @@ private static int next(int top, Coordinate... points) { } private static Coordinate[] fixedOrderedRing(List coordinates, boolean direction) { - return fixedOrderedRing(coordinates.toArray(new Coordinate[coordinates.size()]), direction); + return fixedOrderedRing(coordinates.toArray(new Coordinate[0]), direction); } private static Coordinate[] fixedOrderedRing(Coordinate[] points, boolean direction) { diff --git a/server/src/test/java/org/opensearch/threadpool/UpdateThreadPoolSettingsTests.java b/server/src/test/java/org/opensearch/threadpool/UpdateThreadPoolSettingsTests.java index 419b100558f65..44c5937894828 100644 --- a/server/src/test/java/org/opensearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/server/src/test/java/org/opensearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -248,7 +248,7 @@ public void testCustomThreadPool() throws Exception { private String randomThreadPoolName() { Set threadPoolNames = ThreadPool.THREAD_POOL_TYPES.keySet(); - return randomFrom(threadPoolNames.toArray(new String[threadPoolNames.size()])); + return randomFrom(threadPoolNames.toArray(new String[0])); } } diff --git a/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java index 5089368776ad6..9dcb5a07030cf 100644 --- a/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/opensearch/action/support/replication/ClusterStateCreationUtils.java @@ -460,7 +460,7 @@ private static DiscoveryNode newNode(int nodeId) { } private static String selectAndRemove(Set strings) { - String selection = randomFrom(strings.toArray(new String[strings.size()])); + String selection = randomFrom(strings.toArray(new String[0])); strings.remove(selection); return selection; } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index f520206e0f866..f874ab44d9d3b 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -67,6 +67,7 @@ import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.internal.io.IOUtils; @@ -104,6 +105,7 @@ import org.opensearch.indices.recovery.RecoveryResponse; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoverySourceHandler; +import org.opensearch.indices.recovery.RecoverySourceHandlerFactory; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.recovery.StartRecoveryRequest; @@ -132,8 +134,8 @@ import org.opensearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -861,17 +863,23 @@ protected final void recoverUnstartedReplica( recoveryTarget, startingSeqNo ); - int fileChunkSizeInBytes = Math.toIntExact( - randomBoolean() ? RecoverySettings.DEFAULT_CHUNK_SIZE.getBytes() : randomIntBetween(1, 10 * 1024 * 1024) + long fileChunkSizeInBytes = randomBoolean() + ? RecoverySettings.DEFAULT_CHUNK_SIZE.getBytes() + : randomIntBetween(1, 10 * 1024 * 1024); + final Settings settings = Settings.builder() + .put("indices.recovery.max_concurrent_file_chunks", Integer.toString(between(1, 4))) + .put("indices.recovery.max_concurrent_operations", Integer.toString(between(1, 4))) + .build(); + RecoverySettings recoverySettings = new RecoverySettings( + settings, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - final RecoverySourceHandler recovery = new RecoverySourceHandler( + recoverySettings.setChunkSize(new ByteSizeValue(fileChunkSizeInBytes)); + final RecoverySourceHandler recovery = RecoverySourceHandlerFactory.create( primary, new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), - threadPool, request, - fileChunkSizeInBytes, - between(1, 8), - between(1, 8) + recoverySettings ); primary.updateShardState( primary.routingEntry(), diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java index f2b713852584b..c5f4e171e0cf2 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java @@ -309,7 +309,7 @@ public void testSnapshotAndRestore() throws Exception { List deleteIndices = randomSubsetOf(randomIntBetween(0, indexCount), indexNames); if (deleteIndices.size() > 0) { logger.info("--> delete indices {}", deleteIndices); - assertAcked(client().admin().indices().prepareDelete(deleteIndices.toArray(new String[deleteIndices.size()]))); + assertAcked(client().admin().indices().prepareDelete(deleteIndices.toArray(new String[0]))); } Set closeIndices = new HashSet<>(Arrays.asList(indexNames)); @@ -335,7 +335,7 @@ public void testSnapshotAndRestore() throws Exception { // Wait for green so the close does not fail in the edge case of coinciding with a shard recovery that hasn't fully synced yet ensureGreen(); logger.info("--> close indices {}", closeIndices); - assertAcked(client().admin().indices().prepareClose(closeIndices.toArray(new String[closeIndices.size()]))); + assertAcked(client().admin().indices().prepareClose(closeIndices.toArray(new String[0]))); } logger.info("--> restore all indices from the snapshot"); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 4863880d38052..685bb45ca2050 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -1743,7 +1743,7 @@ public InetSocketAddress[] httpAddresses() { for (HttpServerTransport httpServerTransport : getInstances(HttpServerTransport.class)) { addresses.add(httpServerTransport.boundAddress().publishAddress().address()); } - return addresses.toArray(new InetSocketAddress[addresses.size()]); + return addresses.toArray(new InetSocketAddress[0]); } /** diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 94aae206c2eb3..a100aa1c9fe42 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -778,7 +778,7 @@ public final void createIndex(String... names) { success = true; } finally { if (!success && !created.isEmpty()) { - cluster().wipeIndices(created.toArray(new String[created.size()])); + cluster().wipeIndices(created.toArray(new String[0])); } } } @@ -2317,7 +2317,7 @@ protected static RestClient createRestClient( hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); } } - RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[hosts.size()])); + RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[0])); if (httpClientConfigCallback != null) { builder.setHttpClientConfigCallback(httpClientConfigCallback); } diff --git a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java index 5945ac01b4547..c2d36a5426fd0 100644 --- a/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/XContentTestUtils.java @@ -293,7 +293,7 @@ static List getInsertPaths(XContentParser parser, Stack currentP currentPath.push(parser.currentName().replaceAll("\\.", "\\\\.")); } if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - validPaths.add(String.join(".", currentPath.toArray(new String[currentPath.size()]))); + validPaths.add(String.join(".", currentPath.toArray(new String[0]))); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { if (parser.currentToken() == XContentParser.Token.START_OBJECT || parser.currentToken() == XContentParser.Token.START_ARRAY) { diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 16d44d1f8eeb4..c2032e25b06a5 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -272,10 +272,7 @@ public static void assertSearchHits(SearchResponse searchResponse, String... ids ); } assertThat( - "Some expected ids were not found in search results: " - + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) - + "." - + shardStatus, + "Some expected ids were not found in search results: " + Arrays.toString(idsSet.toArray(new String[0])) + "." + shardStatus, idsSet.size(), equalTo(0) ); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index ec5a617adbde9..a353f53ab1bb3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -207,8 +207,8 @@ public void initClient() throws IOException { } clusterHosts = unmodifiableList(hosts); logger.info("initializing REST clients against {}", clusterHosts); - client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); - adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); + client = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); + adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[0])); nodeVersions = new TreeSet<>(); Map response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins"))); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java index 13ede9d44f1ad..849d7e4685a76 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java @@ -59,6 +59,8 @@ import java.io.UncheckedIOException; import java.net.URI; import java.net.URISyntaxException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -193,7 +195,7 @@ public ClientYamlTestResponse callApi( String contentType = entity.getContentType(); // randomly test the GET with source param instead of GET/POST with body try { - if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { + if (sendBodyAsSourceParam(supportedMethods, contentType, entity)) { logger.debug("sending the request body as source param with GET method"); queryStringParams.put("source", EntityUtils.toString(entity)); queryStringParams.put("source_content_type", contentType); @@ -253,11 +255,13 @@ protected static void setOptions(Request request, Map headers) { request.setOptions(options); } - private static boolean sendBodyAsSourceParam(List supportedMethods, String contentType, long contentLength) { + private static boolean sendBodyAsSourceParam(List supportedMethods, String contentType, HttpEntity entity) throws IOException, + ParseException { if (false == supportedMethods.contains(HttpGet.METHOD_NAME)) { // The API doesn't claim to support GET anyway return false; } + long contentLength = entity.getContentLength(); if (contentLength < 0) { // Negative length means "unknown" or "huge" in this case. Either way we can't send it as a parameter return false; @@ -271,7 +275,18 @@ private static boolean sendBodyAsSourceParam(List supportedMethods, Stri // We can only encode JSON or YAML this way. return false; } - return RandomizedTest.rarely(); + + return RandomizedTest.rarely() && isUrlEncodedLengthUnderLimit(entity); + } + + /* + * There is a limit of 4096 bytes for the HTTP line, otherwise there will be too_long_http_line_exception. + * We check if the length of the url-encoded source parameter is less than 3000, leaving remaining for + * url and other params. + */ + private static boolean isUrlEncodedLengthUnderLimit(HttpEntity entity) throws IOException, ParseException { + String encoded = URLEncoder.encode(EntityUtils.toString(entity), StandardCharsets.UTF_8); + return encoded.length() < 3000; } private ClientYamlSuiteRestApi restApi(String apiName) { diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index c80b120ad0148..0324ef34c4958 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -257,7 +257,7 @@ private static TransportAddress[] extractTransportAddresses(TransportService tra BoundTransportAddress boundTransportAddress = transportService.boundAddress(); transportAddresses.addAll(Arrays.asList(boundTransportAddress.boundAddresses())); transportAddresses.add(boundTransportAddress.publishAddress()); - return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]); + return transportAddresses.toArray(new TransportAddress[0]); } @Override