Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into pull-request-checks
Browse files Browse the repository at this point in the history
Signed-off-by: Peter Nied <petern@amazon.com>
  • Loading branch information
peternied committed Oct 23, 2023
2 parents ce61acb + da85124 commit 21a9a26
Show file tree
Hide file tree
Showing 102 changed files with 2,734 additions and 419 deletions.
5 changes: 4 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- [Remote cluster state] Upload global metadata in cluster state to remote store([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404))
- [Remote cluster state] Download functionality of global metadata from remote store ([#10535](https://github.com/opensearch-project/OpenSearch/pull/10535))
- [Remote cluster state] Restore global metadata from remote store when local state is lost after quorum loss ([#10404](https://github.com/opensearch-project/OpenSearch/pull/10404))
- [AdmissionControl] Added changes for AdmissionControl Interceptor and AdmissionControlService for RateLimiting ([#9286](https://github.com/opensearch-project/OpenSearch/pull/9286))

### Dependencies
- Bump `log4j-core` from 2.18.0 to 2.19.0
Expand Down Expand Up @@ -93,6 +94,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- [Remote Store] Add repository stats for remote store([#10567](https://github.com/opensearch-project/OpenSearch/pull/10567))
- Add search query categorizer ([#10255](https://github.com/opensearch-project/OpenSearch/pull/10255))
- Introduce ConcurrentQueryProfiler to profile query using concurrent segment search path and support concurrency during rewrite and create weight ([10352](https://github.com/opensearch-project/OpenSearch/pull/10352))
- [Remote cluster state] Make index and global metadata upload timeout dynamic cluster settings ([#10814](https://github.com/opensearch-project/OpenSearch/pull/10814))

### Dependencies
- Bump `com.google.api.grpc:proto-google-common-protos` from 2.10.0 to 2.25.1 ([#10208](https://github.com/opensearch-project/OpenSearch/pull/10208), [#10298](https://github.com/opensearch-project/OpenSearch/pull/10298))
Expand All @@ -105,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `org.codehaus.woodstox:stax2-api` from 4.2.1 to 4.2.2 ([#10639](https://github.com/opensearch-project/OpenSearch/pull/10639))
- Bump `com.google.http-client:google-http-client` from 1.43.2 to 1.43.3 ([#10635](https://github.com/opensearch-project/OpenSearch/pull/10635))
- Bump `com.squareup.okio:okio` from 3.5.0 to 3.6.0 ([#10637](https://github.com/opensearch-project/OpenSearch/pull/10637))
- Bump `org.apache.logging.log4j:log4j-core` from 2.20.0 to 2.21.0 ([#10858](https://github.com/opensearch-project/OpenSearch/pull/10858))

### Changed
- Mute the query profile IT with concurrent execution ([#9840](https://github.com/opensearch-project/OpenSearch/pull/9840))
Expand All @@ -128,4 +131,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Security

[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD
[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x
[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.12...2.x
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@ plugins {
repositories {
mavenCentral()
}

dependencies {
implementation "org.apache.logging.log4j:log4j-core:2.20.0"
implementation "org.apache.logging.log4j:log4j-core:2.21.0"
}

["0.0.1", "0.0.2"].forEach { v ->
Expand Down
2 changes: 1 addition & 1 deletion buildSrc/version.properties
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jackson_databind = 2.15.2
snakeyaml = 2.1
icu4j = 70.1
supercsv = 2.4.0
log4j = 2.20.0
log4j = 2.21.0
slf4j = 1.7.36
asm = 9.6
jettison = 1.5.4
Expand Down
1 change: 0 additions & 1 deletion libs/core/licenses/log4j-api-2.20.0.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions libs/core/licenses/log4j-api-2.21.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
760192f2b69eacf4a4afc78e5a1d7a8de054fcbd
1 change: 0 additions & 1 deletion plugins/crypto-kms/licenses/log4j-1.2-api-2.20.0.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions plugins/crypto-kms/licenses/log4j-1.2-api-2.21.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
12bad3819a9570807f3c97315930699584c12152

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
12bad3819a9570807f3c97315930699584c12152

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
12bad3819a9570807f3c97315930699584c12152

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
12bad3819a9570807f3c97315930699584c12152

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
12bad3819a9570807f3c97315930699584c12152

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
911fdb5b1a1df36719c579ecc6f2957b88bce1ab

This file was deleted.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
12bad3819a9570807f3c97315930699584c12152
5 changes: 5 additions & 0 deletions qa/os/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,11 @@ tasks.dependenciesInfo.enabled = false

tasks.thirdPartyAudit.ignoreMissingClasses()

tasks.thirdPartyAudit.ignoreViolations(
'org.apache.logging.log4j.core.util.internal.UnsafeUtil',
'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1'
)

tasks.register('destructivePackagingTest') {
dependsOn 'destructiveDistroTest'
}
Expand Down
4 changes: 3 additions & 1 deletion server/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,9 @@ tasks.named("thirdPartyAudit").configure {
'com.google.protobuf.UnsafeUtil$Android32MemoryAccessor',
'com.google.protobuf.UnsafeUtil$Android64MemoryAccessor',
'com.google.protobuf.UnsafeUtil$JvmMemoryAccessor',
'com.google.protobuf.UnsafeUtil$MemoryAccessor'
'com.google.protobuf.UnsafeUtil$MemoryAccessor',
'org.apache.logging.log4j.core.util.internal.UnsafeUtil',
'org.apache.logging.log4j.core.util.internal.UnsafeUtil$1'
)
}

Expand Down
1 change: 0 additions & 1 deletion server/licenses/log4j-api-2.20.0.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions server/licenses/log4j-api-2.21.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
760192f2b69eacf4a4afc78e5a1d7a8de054fcbd
1 change: 0 additions & 1 deletion server/licenses/log4j-core-2.20.0.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions server/licenses/log4j-core-2.21.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
122e1a9e0603cc9eae07b0846a6ff01f2454bc49
1 change: 0 additions & 1 deletion server/licenses/log4j-jul-2.20.0.jar.sha1

This file was deleted.

1 change: 1 addition & 0 deletions server/licenses/log4j-jul-2.21.0.jar.sha1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
f0da61113f4a47654677e6a98b1e13ca7de2483d
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ public AcknowledgedResponse createDataStream(String name) throws Exception {
CreateDataStreamAction.Request request = new CreateDataStreamAction.Request(name);
AcknowledgedResponse response = client().admin().indices().createDataStream(request).get();
assertThat(response.isAcknowledged(), is(true));
performRemoteStoreTestAction();
return response;
}

Expand Down Expand Up @@ -67,6 +68,7 @@ public RolloverResponse rolloverDataStream(String name) throws Exception {
RolloverResponse response = client().admin().indices().rolloverIndex(request).get();
assertThat(response.isAcknowledged(), is(true));
assertThat(response.isRolledOver(), is(true));
performRemoteStoreTestAction();
return response;
}

Expand Down Expand Up @@ -109,5 +111,4 @@ public AcknowledgedResponse deleteIndexTemplate(String name) throws Exception {
assertThat(response.isAcknowledged(), is(true));
return response;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.opensearch.index.shard.IndexShard;
import org.opensearch.indices.IndicesService;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.junit.annotations.TestLogging;
import org.opensearch.test.transport.MockTransportService;
import org.opensearch.transport.TransportService;

Expand Down Expand Up @@ -55,6 +56,7 @@ private void createIndex(int replicaCount) {
* This test verifies happy path when primary shard is relocated newly added node (target) in the cluster. Before
* relocation and after relocation documents are indexed and documents are verified
*/
@TestLogging(reason = "Getting trace logs from replication,shard and allocation package", value = "org.opensearch.indices.replication:TRACE, org.opensearch.index.shard:TRACE, org.opensearch.cluster.routing.allocation:TRACE")
public void testPrimaryRelocation() throws Exception {
final String oldPrimary = internalCluster().startNode();
createIndex(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ public void testWritesRejectedDueToBytesLagBreach() throws Exception {
public void testWritesRejectedDueToTimeLagBreach() throws Exception {
// Initially indexing happens with doc size of 1KB, then all remote store interactions start failing. Now, the
// indexing happens with doc size of 1 byte leading to time lag limit getting exceeded and leading to rejections.
validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 15, "time_lag");
validateBackpressure(ByteSizeUnit.KB.toIntBytes(1), 20, ByteSizeUnit.BYTES.toIntBytes(1), 3, "time_lag");
}

private void validateBackpressure(
Expand Down Expand Up @@ -133,11 +133,13 @@ private RemoteSegmentTransferTracker.Stats stats() {
return matches.get(0).getSegmentStats();
}

private void indexDocAndRefresh(BytesReference source, int iterations) {
private void indexDocAndRefresh(BytesReference source, int iterations) throws InterruptedException {
for (int i = 0; i < iterations; i++) {
client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get();
refresh(INDEX_NAME);
}
Thread.sleep(250);
client().prepareIndex(INDEX_NAME).setSource(source, MediaTypeRegistry.JSON).get();
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
package org.opensearch.remotestore;

import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
import org.opensearch.action.admin.indices.datastream.DataStreamRolloverIT;
import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest;
import org.opensearch.cluster.ClusterState;
Expand All @@ -21,16 +22,19 @@
import org.opensearch.gateway.remote.ClusterMetadataManifest;
import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata;
import org.opensearch.gateway.remote.RemoteClusterStateService;
import org.opensearch.test.InternalTestCluster;
import org.opensearch.test.OpenSearchIntegTestCase;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ExecutionException;

import static org.opensearch.cluster.coordination.ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING;
import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_READ_ONLY_SETTING;
import static org.opensearch.cluster.metadata.Metadata.CLUSTER_READ_ONLY_BLOCK;
import static org.opensearch.cluster.metadata.Metadata.SETTING_READ_ONLY_SETTING;
Expand Down Expand Up @@ -93,6 +97,72 @@ public void testFullClusterRestore() throws Exception {
verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true);
}

/**
* This test scenario covers the case where right after remote state restore and persisting it to disk via LucenePersistedState, full cluster restarts.
* This is a special case for remote state as at this point cluster uuid in the restored state is still ClusterState.UNKNOWN_UUID as we persist it disk.
* After restart the local disk state will be read but should be again overridden with remote state.
*
* 1. Form a cluster and index few docs
* 2. Replace all nodes to remove all local disk state
* 3. Start cluster manager node without correct seeding to ensure local disk state is written with cluster uuid ClusterState.UNKNOWN_UUID but with remote restored Metadata
* 4. Restart the cluster manager node with correct seeding.
* 5. After restart the cluster manager picks up the local disk state with has same Metadata as remote but cluster uuid is still ClusterState.UNKNOWN_UUID
* 6. The cluster manager will try to restore from remote again.
* 7. Metadata loaded from local disk state will be overridden with remote Metadata and no conflict should arise.
* 8. Add data nodes to recover index data
* 9. Verify Metadata and index data is restored.
*/
public void testFullClusterRestoreDoesntFailWithConflictingLocalState() throws Exception {
int shardCount = randomIntBetween(1, 2);
int replicaCount = 1;
int dataNodeCount = shardCount * (replicaCount + 1);
int clusterManagerNodeCount = 1;

// index some data to generate files in remote directory
Map<String, Long> indexStats = initialTestSetup(shardCount, replicaCount, dataNodeCount, 1);
String prevClusterUUID = clusterService().state().metadata().clusterUUID();

// stop all nodes
internalCluster().stopAllNodes();

// start a cluster manager node with no cluster manager seeding.
// This should fail with IllegalStateException as cluster manager fails to form without any initial seed
assertThrows(
IllegalStateException.class,
() -> internalCluster().startClusterManagerOnlyNodes(
clusterManagerNodeCount,
Settings.builder()
.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey()) // disable seeding during bootstrapping
.build()
)
);

// verify cluster manager not elected
String newClusterUUID = clusterService().state().metadata().clusterUUID();
assert Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID)
: "Disabling Cluster manager seeding failed. cluster uuid is not unknown";

// restart cluster manager with correct seed
internalCluster().fullRestart(new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) {
return Settings.builder()
.putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) // Seed with correct Cluster Manager node
.build();
}
});

// validate new cluster state formed
newClusterUUID = clusterService().state().metadata().clusterUUID();
assert !Objects.equals(newClusterUUID, ClusterState.UNKNOWN_UUID) : "cluster restart not successful. cluster uuid is still unknown";
assert !Objects.equals(newClusterUUID, prevClusterUUID) : "cluster restart not successful. cluster uuid is same";
validateMetadata(List.of(INDEX_NAME));

// start data nodes to trigger index data recovery
internalCluster().startDataOnlyNodes(dataNodeCount);
verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, true);
}

public void testFullClusterRestoreMultipleIndices() throws Exception {
int shardCount = randomIntBetween(1, 2);
int replicaCount = 1;
Expand Down Expand Up @@ -222,6 +292,14 @@ private void validateCurrentMetadata() throws Exception {
});
}

public void testDataStreamPostRemoteStateRestore() throws Exception {
new DataStreamRolloverIT() {
protected boolean triggerRemoteStateRestore() {
return true;
}
}.testDataStreamRollover();
}

public void testFullClusterRestoreGlobalMetadata() throws Exception {
int shardCount = randomIntBetween(1, 2);
int replicaCount = 1;
Expand All @@ -233,8 +311,7 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception {
String prevClusterUUID = clusterService().state().metadata().clusterUUID();

// Create global metadata - register a custom repo
// TODO - uncomment after all customs is also uploaded for all repos - https://github.com/opensearch-project/OpenSearch/issues/10691
// registerCustomRepository();
Path repoPath = registerCustomRepository();

// Create global metadata - persistent settings
updatePersistentSettings(Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), 34).build());
Expand Down Expand Up @@ -263,30 +340,36 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception {
verifyRedIndicesAndTriggerRestore(indexStats, INDEX_NAME, false);

// validate global metadata restored
verifyRestoredRepositories();
verifyRestoredRepositories(repoPath);
verifyRestoredIndexTemplate();
}

private void registerCustomRepository() {
private Path registerCustomRepository() {
Path path = randomRepoPath();
assertAcked(
client().admin()
.cluster()
.preparePutRepository("custom-repo")
.setType("fs")
.setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", false))
.setSettings(Settings.builder().put("location", path).put("compress", false))
.get()
);
return path;
}

private void verifyRestoredRepositories() {
private void verifyRestoredRepositories(Path repoPath) {
RepositoriesMetadata repositoriesMetadata = clusterService().state().metadata().custom(RepositoriesMetadata.TYPE);
assertEquals(2, repositoriesMetadata.repositories().size()); // includes remote store repo as well
assertEquals(3, repositoriesMetadata.repositories().size()); // includes remote store repo as well
assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings()));
assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings()));
// TODO - uncomment after all customs is also uploaded for all repos - https://github.com/opensearch-project/OpenSearch/issues/10691
// assertEquals("fs", repositoriesMetadata.repository("custom-repo").type());
// assertEquals(Settings.builder().put("location", randomRepoPath()).put("compress", false).build(),
// repositoriesMetadata.repository("custom-repo").settings());
assertEquals("fs", repositoriesMetadata.repository("custom-repo").type());
assertEquals(
Settings.builder().put("location", repoPath).put("compress", false).build(),
repositoriesMetadata.repository("custom-repo").settings()
);

// repo cleanup post verification
clusterAdmin().prepareDeleteRepository("custom-repo").get();
}

private void addClusterLevelReadOnlyBlock() throws InterruptedException, ExecutionException {
Expand Down
Loading

0 comments on commit 21a9a26

Please sign in to comment.