From 36f8531bf463788547545b52feca588b76ad49b5 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 28 Mar 2018 09:35:05 +0200 Subject: [PATCH 01/68] Don't load global state when only restoring indices (#29239) Restoring a snapshot, or getting the status of finished snapshots, currently always load the global state metadata file from the repository even if it not required. This slows down the restore process (or listing statuses process) and can also be an issue if the global state cannot be deserialized (because it has unknown customs for example). This commit splits the Repository.getSnapshotMetadata() method into two distincts methods: getGlobalMetadata() and getIndexMetadata() that are now called only when needed. --- .../repositories/Repository.java | 21 +- .../blobstore/BlobStoreRepository.java | 70 +++--- .../snapshots/RestoreService.java | 89 ++++---- .../snapshots/SnapshotsService.java | 13 +- .../index/shard/IndexShardTests.java | 7 +- ...etadataLoadingDuringSnapshotRestoreIT.java | 208 ++++++++++++++++++ .../SharedClusterSnapshotRestoreIT.java | 157 ++++++++++++- 7 files changed, 480 insertions(+), 85 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java diff --git a/server/src/main/java/org/elasticsearch/repositories/Repository.java b/server/src/main/java/org/elasticsearch/repositories/Repository.java index c8f830c461129..c0b45259f9911 100644 --- a/server/src/main/java/org/elasticsearch/repositories/Repository.java +++ b/server/src/main/java/org/elasticsearch/repositories/Repository.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.IndexCommit; import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -78,15 +79,21 @@ interface Factory { SnapshotInfo getSnapshotInfo(SnapshotId snapshotId); /** - * Returns global metadata associate with the snapshot. - *

- * The returned meta data contains global metadata as well as metadata for all indices listed in the indices parameter. + * Returns global metadata associated with the snapshot. * - * @param snapshot snapshot - * @param indices list of indices - * @return information about snapshot + * @param snapshotId the snapshot id to load the global metadata from + * @return the global metadata about the snapshot + */ + MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId); + + /** + * Returns the index metadata associated with the snapshot. + * + * @param snapshotId the snapshot id to load the index metadata from + * @param index the {@link IndexId} to load the metadata from + * @return the index metadata about the given index for the given snapshot */ - MetaData getSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException; + IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException; /** * Returns a {@link RepositoryData} to describe the data in the repository, including the snapshots diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 020ea6a0f0887..e4101bb9289b1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -480,11 +480,6 @@ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId, return blobStoreSnapshot; } - @Override - public MetaData getSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException { - return readSnapshotMetaData(snapshot.snapshotId(), snapshot.version(), indices, false); - } - @Override public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { try { @@ -496,38 +491,59 @@ public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { } } - private MetaData readSnapshotMetaData(SnapshotId snapshotId, Version snapshotVersion, List indices, boolean ignoreIndexErrors) throws IOException { - MetaData metaData; + @Override + public MetaData getSnapshotGlobalMetaData(final SnapshotId snapshotId) { + try { + return globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID()); + } catch (NoSuchFileException ex) { + throw new SnapshotMissingException(metadata.name(), snapshotId, ex); + } catch (IOException ex) { + throw new SnapshotException(metadata.name(), snapshotId, "failed to read global metadata", ex); + } + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(final SnapshotId snapshotId, final IndexId index) throws IOException { + final BlobPath indexPath = basePath().add("indices").add(index.getId()); + return indexMetaDataFormat.read(blobStore().blobContainer(indexPath), snapshotId.getUUID()); + } + + /** + * Returns the global metadata associated with the snapshot. + *

+ * The returned meta data contains global metadata as well as metadata + * for all indices listed in the indices parameter. + */ + private MetaData readSnapshotMetaData(final SnapshotId snapshotId, + final Version snapshotVersion, + final List indices, + final boolean ignoreErrors) throws IOException { if (snapshotVersion == null) { // When we delete corrupted snapshots we might not know which version we are dealing with // We can try detecting the version based on the metadata file format - assert ignoreIndexErrors; + assert ignoreErrors; if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID()) == false) { throw new SnapshotMissingException(metadata.name(), snapshotId); } } - try { - metaData = globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID()); - } catch (NoSuchFileException ex) { - throw new SnapshotMissingException(metadata.name(), snapshotId, ex); - } catch (IOException ex) { - throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex); - } - MetaData.Builder metaDataBuilder = MetaData.builder(metaData); - for (IndexId index : indices) { - BlobPath indexPath = basePath().add("indices").add(index.getId()); - BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); - try { - metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false); - } catch (ElasticsearchParseException | IOException ex) { - if (ignoreIndexErrors) { - logger.warn(() -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); - } else { - throw ex; + + final MetaData.Builder metaData = MetaData.builder(getSnapshotGlobalMetaData(snapshotId)); + if (indices != null) { + for (IndexId index : indices) { + try { + metaData.put(getSnapshotIndexMetaData(snapshotId, index), false); + } catch (ElasticsearchParseException | IOException ex) { + if (ignoreErrors == false) { + throw new SnapshotException(metadata.name(), snapshotId, + "[" + index.getName() + "] failed to read metadata for index", ex); + } else { + logger.warn(() -> + new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); + } } } } - return metaDataBuilder.build(); + return metaData.build(); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index e6b54a20a1e07..63079fd63ce24 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -66,6 +66,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -91,6 +92,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_UPGRADED; import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.elasticsearch.snapshots.SnapshotUtils.filterIndices; /** * Service responsible for restoring snapshots @@ -182,17 +184,34 @@ public void restoreSnapshot(final RestoreRequest request, final ActionListener filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions()); - final MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices)); // Make sure that we can restore from this snapshot validateSnapshotRestorable(request.repositoryName, snapshotInfo); - // Find list of indices that we need to restore - final Map renamedIndices = renamedIndices(request, filteredIndices); + // Resolve the indices from the snapshot that need to be restored + final List indicesInSnapshot = filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions()); + + final MetaData.Builder metaDataBuilder; + if (request.includeGlobalState()) { + metaDataBuilder = MetaData.builder(repository.getSnapshotGlobalMetaData(snapshotId)); + } else { + metaDataBuilder = MetaData.builder(); + } + + final List indexIdsInSnapshot = repositoryData.resolveIndices(indicesInSnapshot); + for (IndexId indexId : indexIdsInSnapshot) { + metaDataBuilder.put(repository.getSnapshotIndexMetaData(snapshotId, indexId), false); + } + + final MetaData metaData = metaDataBuilder.build(); + + // Apply renaming on index names, returning a map of names where + // the key is the renamed index and the value is the original name + final Map indices = renamedIndices(request, indicesInSnapshot); // Now we can start the actual restore process by adding shards to be recovered in the cluster state // and updating cluster metadata (global and index) as needed @@ -222,12 +241,13 @@ public ClusterState execute(ClusterState currentState) { RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); ImmutableOpenMap shards; Set aliases = new HashSet<>(); - if (!renamedIndices.isEmpty()) { + + if (indices.isEmpty() == false) { // We have some indices to restore ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder(); final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion() .minimumIndexCompatibilityVersion(); - for (Map.Entry indexEntry : renamedIndices.entrySet()) { + for (Map.Entry indexEntry : indices.entrySet()) { String index = indexEntry.getValue(); boolean partial = checkPartial(index); SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(snapshot, snapshotInfo.version(), index); @@ -304,21 +324,42 @@ public ClusterState execute(ClusterState currentState) { } shards = shardsBuilder.build(); - RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards); + RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(indices.keySet())), shards); builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry)); } else { shards = ImmutableOpenMap.of(); } - checkAliasNameConflicts(renamedIndices, aliases); + checkAliasNameConflicts(indices, aliases); // Restore global state if needed - restoreGlobalStateIfRequested(mdBuilder); + if (request.includeGlobalState()) { + if (metaData.persistentSettings() != null) { + Settings settings = metaData.persistentSettings(); + clusterSettings.validateUpdate(settings); + mdBuilder.persistentSettings(settings); + } + if (metaData.templates() != null) { + // TODO: Should all existing templates be deleted first? + for (ObjectCursor cursor : metaData.templates().values()) { + mdBuilder.put(cursor.value); + } + } + if (metaData.customs() != null) { + for (ObjectObjectCursor cursor : metaData.customs()) { + if (!RepositoriesMetaData.TYPE.equals(cursor.key)) { + // Don't restore repositories while we are working with them + // TODO: Should we restore them at the end? + mdBuilder.putCustom(cursor.key, cursor.value); + } + } + } + } if (completed(shards)) { // We don't have any indices to restore - we are done restoreInfo = new RestoreInfo(snapshotId.getName(), - Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), + Collections.unmodifiableList(new ArrayList<>(indices.keySet())), shards.size(), shards.size() - failedShards(shards)); } @@ -426,32 +467,6 @@ private IndexMetaData updateIndexSettings(IndexMetaData indexMetaData, Settings return builder.settings(settingsBuilder).build(); } - private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) { - if (request.includeGlobalState()) { - if (metaData.persistentSettings() != null) { - Settings settings = metaData.persistentSettings(); - clusterSettings.validateUpdate(settings); - mdBuilder.persistentSettings(settings); - } - if (metaData.templates() != null) { - // TODO: Should all existing templates be deleted first? - for (ObjectCursor cursor : metaData.templates().values()) { - mdBuilder.put(cursor.value); - } - } - if (metaData.customs() != null) { - for (ObjectObjectCursor cursor : metaData.customs()) { - if (!RepositoriesMetaData.TYPE.equals(cursor.key)) { - // Don't restore repositories while we are working with them - // TODO: Should we restore them at the end? - mdBuilder.putCustom(cursor.key, cursor.value); - } - } - } - } - } - - @Override public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e); @@ -757,7 +772,7 @@ private Map renamedIndices(RestoreRequest request, List "indices [" + index + "] and [" + previousIndex + "] are renamed into the same index [" + renamedIndex + "]"); } } - return renamedIndices; + return Collections.unmodifiableMap(renamedIndices); } /** diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 287bb2fed22a7..daf5c78b78cee 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -148,7 +148,7 @@ public RepositoryData getRepositoryData(final String repositoryName) { * @throws SnapshotMissingException if snapshot is not found */ public SnapshotInfo snapshot(final String repositoryName, final SnapshotId snapshotId) { - List entries = currentSnapshots(repositoryName, Arrays.asList(snapshotId.getName())); + List entries = currentSnapshots(repositoryName, Collections.singletonList(snapshotId.getName())); if (!entries.isEmpty()) { return inProgressSnapshot(entries.iterator().next()); } @@ -593,13 +593,13 @@ public List currentSnapshots(final String repository, */ public Map snapshotShards(final String repositoryName, final SnapshotInfo snapshotInfo) throws IOException { - Map shardStatus = new HashMap<>(); - Repository repository = repositoriesService.repository(repositoryName); - RepositoryData repositoryData = repository.getRepositoryData(); - MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(snapshotInfo.indices())); + final Repository repository = repositoriesService.repository(repositoryName); + final RepositoryData repositoryData = repository.getRepositoryData(); + + final Map shardStatus = new HashMap<>(); for (String index : snapshotInfo.indices()) { IndexId indexId = repositoryData.resolveIndexId(index); - IndexMetaData indexMetaData = metaData.indices().get(index); + IndexMetaData indexMetaData = repository.getSnapshotIndexMetaData(snapshotInfo.snapshotId(), indexId); if (indexMetaData != null) { int numberOfShards = indexMetaData.getNumberOfShards(); for (int i = 0; i < numberOfShards; i++) { @@ -633,7 +633,6 @@ public Map snapshotShards(final String reposi return unmodifiableMap(shardStatus); } - private SnapshotShardFailure findShardFailure(List shardFailures, ShardId shardId) { for (SnapshotShardFailure shardFailure : shardFailures) { if (shardId.getIndexName().equals(shardFailure.index()) && shardId.getId() == shardFailure.shardId()) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 822294a9c19f7..ed98406f343a0 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2605,7 +2605,12 @@ public SnapshotInfo getSnapshotInfo(SnapshotId snapshotId) { } @Override - public MetaData getSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException { + public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { + return null; + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId index) throws IOException { return null; } diff --git a/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java new file mode 100644 index 0000000000000..bbc2a54b41baf --- /dev/null +++ b/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -0,0 +1,208 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.snapshots.mockstore.MockRepository; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.nullValue; + +/** + * This class tests whether global and index metadata are only loaded from the repository when needed. +*/ +public class MetadataLoadingDuringSnapshotRestoreIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Collection> nodePlugins() { + /// This test uses a snapshot/restore plugin implementation that + // counts the number of times metadata are loaded + return Collections.singletonList(CountingMockRepositoryPlugin.class); + } + + public void testWhenMetadataAreLoaded() throws Exception { + createIndex("docs"); + indexRandom(true, + client().prepareIndex("docs", "doc", "1").setSource("rank", 1), + client().prepareIndex("docs", "doc", "2").setSource("rank", 2), + client().prepareIndex("docs", "doc", "3").setSource("rank", 3), + client().prepareIndex("others", "other").setSource("rank", 4), + client().prepareIndex("others", "other").setSource("rank", 5)); + + assertAcked(client().admin().cluster().preparePutRepository("repository") + .setType("coutingmock") + .setSettings(Settings.builder().put("location", randomRepoPath()))); + + // Creating a snapshot does not load any metadata + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("repository", "snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().failedShards(), equalTo(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 0); + assertIndexMetadataLoads("snap", "others", 0); + + // Getting a snapshot does not load any metadata + GetSnapshotsResponse getSnapshotsResponse = + client().admin().cluster().prepareGetSnapshots("repository").addSnapshots("snap").setVerbose(randomBoolean()).get(); + assertThat(getSnapshotsResponse.getSnapshots(), hasSize(1)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 0); + assertIndexMetadataLoads("snap", "others", 0); + + // Getting the status of a snapshot loads indices metadata but not global metadata + SnapshotsStatusResponse snapshotStatusResponse = + client().admin().cluster().prepareSnapshotStatus("repository").setSnapshots("snap").get(); + assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 1); + assertIndexMetadataLoads("snap", "others", 1); + + assertAcked(client().admin().indices().prepareDelete("docs", "others")); + + // Restoring a snapshot loads indices metadata but not the global state + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("repository", "snap") + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 2); + assertIndexMetadataLoads("snap", "others", 2); + + assertAcked(client().admin().indices().prepareDelete("docs")); + + // Restoring a snapshot with selective indices loads only required index metadata + restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("repository", "snap") + .setIndices("docs") + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 0); + assertIndexMetadataLoads("snap", "docs", 3); + assertIndexMetadataLoads("snap", "others", 2); + + assertAcked(client().admin().indices().prepareDelete("docs", "others")); + + // Restoring a snapshot including the global state loads it with the index metadata + restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("repository", "snap") + .setIndices("docs", "oth*") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().status(), equalTo(RestStatus.OK)); + assertGlobalMetadataLoads("snap", 1); + assertIndexMetadataLoads("snap", "docs", 4); + assertIndexMetadataLoads("snap", "others", 3); + } + + private void assertGlobalMetadataLoads(final String snapshot, final int times) { + AtomicInteger count = getCountingMockRepository().globalMetadata.get(snapshot); + if (times == 0) { + assertThat("Global metadata for " + snapshot + " must not have been loaded", count, nullValue()); + } else { + assertThat("Global metadata for " + snapshot + " must have been loaded " + times + " times", count.get(), equalTo(times)); + } + } + + private void assertIndexMetadataLoads(final String snapshot, final String index, final int times) { + final String key = key(snapshot, index); + AtomicInteger count = getCountingMockRepository().indicesMetadata.get(key); + if (times == 0) { + assertThat("Index metadata for " + key + " must not have been loaded", count, nullValue()); + } else { + assertThat("Index metadata for " + key + " must have been loaded " + times + " times", count.get(), equalTo(times)); + } + } + + private CountingMockRepository getCountingMockRepository() { + String master = internalCluster().getMasterName(); + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, master); + Repository repository = repositoriesService.repository("repository"); + assertThat(repository, instanceOf(CountingMockRepository.class)); + return (CountingMockRepository) repository; + } + + /** Compute a map key for the given snapshot and index names **/ + private static String key(final String snapshot, final String index) { + return snapshot + ":" + index; + } + + /** A mocked repository that counts the number of times global/index metadata are accessed **/ + public static class CountingMockRepository extends MockRepository { + + final Map globalMetadata = new ConcurrentHashMap<>(); + final Map indicesMetadata = new ConcurrentHashMap<>(); + + public CountingMockRepository(final RepositoryMetaData metadata, + final Environment environment, + final NamedXContentRegistry namedXContentRegistry) throws IOException { + super(metadata, environment, namedXContentRegistry); + } + + @Override + public MetaData getSnapshotGlobalMetaData(SnapshotId snapshotId) { + globalMetadata.computeIfAbsent(snapshotId.getName(), (s) -> new AtomicInteger(0)).incrementAndGet(); + return super.getSnapshotGlobalMetaData(snapshotId); + } + + @Override + public IndexMetaData getSnapshotIndexMetaData(SnapshotId snapshotId, IndexId indexId) throws IOException { + indicesMetadata.computeIfAbsent(key(snapshotId.getName(), indexId.getName()), (s) -> new AtomicInteger(0)).incrementAndGet(); + return super.getSnapshotIndexMetaData(snapshotId, indexId); + } + } + + /** A plugin that uses CountingMockRepository as implementation of the Repository **/ + public static class CountingMockRepositoryPlugin extends MockRepository.Plugin { + @Override + public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry) { + return Collections.singletonMap("coutingmock", (metadata) -> new CountingMockRepository(metadata, env, namedXContentRegistry)); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 3d4b6d3128a75..d2656619bd58d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; @@ -74,6 +73,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; @@ -85,6 +85,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.script.MockScriptEngine; @@ -109,6 +110,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import java.util.function.Predicate; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -2590,12 +2592,155 @@ public void testListCorruptedSnapshot() throws Exception { assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap-1")); - try { - client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get().getSnapshots(); - } catch (SnapshotException ex) { - assertThat(ex.getRepositoryName(), equalTo("test-repo")); - assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); + final SnapshotException ex = expectThrows(SnapshotException.class, () -> + client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get()); + assertThat(ex.getRepositoryName(), equalTo("test-repo")); + assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); + } + + /** Tests that a snapshot with a corrupted global state file can still be restored */ + public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { + final Path repo = randomRepoPath(); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs") + .setSettings(Settings.builder() + .put("location", repo) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + + createIndex("test-idx-1", "test-idx-2"); + indexRandom(true, + client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar")); + flushAndRefresh("test-idx-1", "test-idx-2"); + + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + // Truncate the global state metadata file + final Path globalStatePath = repo.resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + try(SeekableByteChannel outChan = Files.newByteChannel(globalStatePath, StandardOpenOption.WRITE)) { + outChan.truncate(randomInt(10)); } + + List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots(); + assertThat(snapshotInfos.size(), equalTo(1)); + assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); + + SnapshotsStatusResponse snapshotStatusResponse = + client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); + assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); + assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); + + assertAcked(client().admin().indices().prepareDelete("test-idx-1", "test-idx-2")); + + SnapshotException ex = expectThrows(SnapshotException.class, () -> client().admin().cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRestoreGlobalState(true) + .setWaitForCompletion(true) + .get()); + assertThat(ex.getRepositoryName(), equalTo("test-repo")); + assertThat(ex.getSnapshotName(), equalTo("test-snap")); + assertThat(ex.getMessage(), containsString("failed to read global metadata")); + + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(snapshotInfo.successfulShards())); + + ensureGreen("test-idx-1", "test-idx-2"); + assertHitCount(client().prepareSearch("test-idx-*").setSize(0).get(), 3); + } + + /** + * Tests that a snapshot of multiple indices including one with a corrupted index metadata + * file can still be used to restore the non corrupted indices + * */ + public void testRestoreSnapshotWithCorruptedIndexMetadata() throws Exception { + final Client client = client(); + final Path repo = randomRepoPath(); + final int nbIndices = randomIntBetween(2, 3); + + final Map nbDocsPerIndex = new HashMap<>(); + for (int i = 0; i < nbIndices; i++) { + String indexName = "test-idx-" + i; + + assertAcked(prepareCreate(indexName).setSettings(Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, Math.min(2, numberOfShards())).put(SETTING_NUMBER_OF_REPLICAS, 0))); + + int nbDocs = randomIntBetween(1, 10); + nbDocsPerIndex.put(indexName, nbDocs); + + IndexRequestBuilder[] documents = new IndexRequestBuilder[nbDocs]; + for (int j = 0; j < nbDocs; j++) { + documents[j] = client.prepareIndex(indexName, "_doc").setSource("foo", "bar"); + } + indexRandom(true, documents); + } + flushAndRefresh(); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs") + .setSettings(Settings.builder() + .put("location", repo))); + + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .get(); + + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.failedShards(), equalTo(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.indices(), hasSize(nbIndices)); + + RepositoriesService service = internalCluster().getInstance(RepositoriesService.class, internalCluster().getMasterName()); + Repository repository = service.repository("test-repo"); + + final Map indexIds = repository.getRepositoryData().getIndices(); + assertThat(indexIds.size(), equalTo(nbIndices)); + + // Choose a random index from the snapshot + final IndexId corruptedIndex = randomFrom(indexIds.values()); + final Path indexMetadataPath = repo.resolve("indices") + .resolve(corruptedIndex.getId()) + .resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + + // Truncate the index metadata file + try(SeekableByteChannel outChan = Files.newByteChannel(indexMetadataPath, StandardOpenOption.WRITE)) { + outChan.truncate(randomInt(10)); + } + + List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots(); + assertThat(snapshotInfos.size(), equalTo(1)); + assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); + + assertAcked(client().admin().indices().prepareDelete(nbDocsPerIndex.keySet().toArray(new String[nbDocsPerIndex.size()]))); + + Predicate isRestorableIndex = index -> corruptedIndex.getName().equals(index) == false; + + RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") + .setIndices(nbDocsPerIndex.keySet().stream().filter(isRestorableIndex).toArray(String[]::new)) + .setRestoreGlobalState(randomBoolean()) + .setWaitForCompletion(true) + .get(); + + ensureGreen(); + for (Map.Entry entry : nbDocsPerIndex.entrySet()) { + if (isRestorableIndex.test(entry.getKey())) { + assertHitCount(client().prepareSearch(entry.getKey()).setSize(0).get(), entry.getValue().longValue()); + } + } + + assertAcked(client().admin().cluster().prepareDeleteSnapshot("test-repo", snapshotInfo.snapshotId().getName()).get()); } public void testCannotCreateSnapshotsWithSameName() throws Exception { From ea8e3661d0050cc950781520a8b4e2d9eab24b8f Mon Sep 17 00:00:00 2001 From: Robin Neatherway Date: Wed, 28 Mar 2018 09:20:20 +0100 Subject: [PATCH 02/68] Fix a type check that is always false (#27726) DocumentParser: The checks for Text and Keyword were masked by the earlier check for String, which they are child classes of. As String field types are no longer supported, this check can be removed. --- .../java/org/elasticsearch/index/mapper/DocumentParser.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index ae80052994835..c2e0028544f88 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -625,9 +625,7 @@ private static void parseNullValue(ParseContext context, ObjectMapper parentMapp private static Mapper.Builder createBuilderFromFieldType(final ParseContext context, MappedFieldType fieldType, String currentFieldName) { Mapper.Builder builder = null; - if (fieldType instanceof StringFieldType) { - builder = context.root().findTemplateBuilder(context, currentFieldName, "string", XContentFieldType.STRING); - } else if (fieldType instanceof TextFieldType) { + if (fieldType instanceof TextFieldType) { builder = context.root().findTemplateBuilder(context, currentFieldName, "text", XContentFieldType.STRING); if (builder == null) { builder = new TextFieldMapper.Builder(currentFieldName) From cacf759213160ff011ef91fb441d2b2cd11fcc01 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 28 Mar 2018 12:25:46 +0200 Subject: [PATCH 03/68] Remove RELOCATED index shard state (#29246) as this information is already covered by ReplicationTracker.primaryMode. --- .../TransportReplicationAction.java | 2 +- .../org/elasticsearch/index/IndexService.java | 2 - .../index/seqno/ReplicationTracker.java | 10 ++- .../elasticsearch/index/shard/IndexShard.java | 79 +++++++++---------- .../shard/IndexShardRelocatedException.java | 2 +- .../index/shard/IndexShardState.java | 6 +- .../indices/IndexingMemoryController.java | 2 +- .../recovery/RecoverySourceHandler.java | 8 +- .../indices/recovery/RecoveryTarget.java | 2 +- .../indices/store/IndicesStore.java | 2 +- .../TransportReplicationActionTests.java | 3 +- .../TransportWriteActionTests.java | 2 +- .../index/shard/IndexShardTests.java | 33 ++++---- .../indices/IndicesLifecycleListenerIT.java | 3 +- .../recovery/RecoverySourceHandlerTests.java | 2 +- .../test/store/MockFSIndexStore.java | 2 +- 16 files changed, 82 insertions(+), 78 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index aca8ed4973263..8d6bf9780f7a2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -1001,7 +1001,7 @@ class PrimaryShardReference extends ShardReference } public boolean isRelocated() { - return indexShard.state() == IndexShardState.RELOCATED; + return indexShard.isPrimaryMode() == false; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index 64760629bfd24..db724112574a2 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -731,7 +731,6 @@ private void maybeTrimTranslog() { continue; case POST_RECOVERY: case STARTED: - case RELOCATED: try { shard.trimTranslog(); } catch (IndexShardClosedException | AlreadyClosedException ex) { @@ -751,7 +750,6 @@ private void maybeSyncGlobalCheckpoints() { case CLOSED: case CREATED: case RECOVERING: - case RELOCATED: continue; case POST_RECOVERY: assert false : "shard " + shard.shardId() + " is in post-recovery but marked as active"; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 43d4c48914900..dcca3d48254e5 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -84,7 +84,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * to replica mode (using {@link #completeRelocationHandoff}), as the relocation target will be in charge of the global checkpoint * computation from that point on. */ - boolean primaryMode; + volatile boolean primaryMode; /** * Boolean flag that indicates if a relocation handoff is in progress. A handoff is started by calling {@link #startRelocationHandoff} * and is finished by either calling {@link #completeRelocationHandoff} or {@link #abortRelocationHandoff}, depending on whether the @@ -252,6 +252,14 @@ public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { return globalCheckpoints; } + /** + * Returns whether the replication tracker is in primary mode, i.e., whether the current shard is acting as primary from the point of + * view of replication. + */ + public boolean isPrimaryMode() { + return primaryMode; + } + /** * Class invariant that should hold before and after every invocation of public methods on this class. As Java lacks implication * as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however. diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 30f813e86e234..0ab2cc699d355 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -217,15 +217,13 @@ Runnable getGlobalCheckpointSyncer() { private final IndexShardOperationPermits indexShardOperationPermits; - private static final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); + private static final EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.POST_RECOVERY); // for primaries, we only allow to write when actually started (so the cluster has decided we started) // in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be - // in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target - // which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries). - public static final EnumSet writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); - // replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent + // in state RECOVERING or POST_RECOVERY. + // for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent // a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source - private static final EnumSet writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); + private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); private final IndexSearcherWrapper searcherWrapper; @@ -412,15 +410,14 @@ public void updateShardState(final ShardRouting newRouting, } changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); - } else if (state == IndexShardState.RELOCATED && + } else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isPrimaryMode() == false && (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) { - // if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery - // failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two + // if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery + // failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two // active primaries. throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); } - assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || - state == IndexShardState.CLOSED : + assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED : "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; persistMetadata(path, indexSettings, newRouting, currentRouting, logger); final CountDownLatch shardStateUpdated = new CountDownLatch(1); @@ -538,9 +535,6 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } - if (state == IndexShardState.RELOCATED) { - throw new IndexShardRelocatedException(shardId); - } if (state == IndexShardState.RECOVERING) { throw new IndexShardRecoveringException(shardId); } @@ -558,13 +552,11 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta * Completes the relocation. Operations are blocked and current operations are drained before changing state to relocated. The provided * {@link Runnable} is executed after all operations are successfully blocked. * - * @param reason the reason for the relocation * @param consumer a {@link Runnable} that is executed after operations are blocked * @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation * @throws InterruptedException if blocking operations is interrupted */ - public void relocated( - final String reason, final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { + public void relocated(final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; try { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { @@ -581,9 +573,8 @@ public void relocated( consumer.accept(primaryContext); synchronized (mutex) { verifyRelocatingState(); - changeState(IndexShardState.RELOCATED, reason); + replicationTracker.completeRelocationHandoff(); // make changes to primaryMode flag only under mutex } - replicationTracker.completeRelocationHandoff(); } catch (final Exception e) { try { replicationTracker.abortRelocationHandoff(); @@ -1083,7 +1074,7 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws EngineException { final IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine - if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { + if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) { return getEngine().acquireLastIndexCommit(flushFirst); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); @@ -1097,7 +1088,7 @@ public Engine.IndexCommitRef acquireLastIndexCommit(boolean flushFirst) throws E public Engine.IndexCommitRef acquireSafeIndexCommit() throws EngineException { final IndexShardState state = this.state; // one time volatile read // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine - if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { + if (state == IndexShardState.STARTED || state == IndexShardState.CLOSED) { return getEngine().acquireSafeIndexCommit(); } else { throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); @@ -1202,9 +1193,6 @@ public IndexShard postRecovery(String reason) throws IndexShardStartedException, if (state == IndexShardState.STARTED) { throw new IndexShardStartedException(shardId); } - if (state == IndexShardState.RELOCATED) { - throw new IndexShardRelocatedException(shardId); - } // we need to refresh again to expose all operations that were index until now. Otherwise // we may not expose operations that were indexed with a refresh listener that was immediately // responded to in addRefreshListener. @@ -1408,7 +1396,7 @@ public void finalizeRecovery() { public boolean ignoreRecoveryAttempt() { IndexShardState state = state(); // one time volatile read return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED || - state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED; + state == IndexShardState.CLOSED; } public void readAllowed() throws IllegalIndexShardStateException { @@ -1426,20 +1414,19 @@ public boolean isReadAllowed() { private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (origin == Engine.Operation.Origin.PRIMARY) { - verifyPrimary(); - if (writeAllowedStatesForPrimary.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]"); - } - } else if (origin.isRecovery()) { + if (origin.isRecovery()) { if (state != IndexShardState.RECOVERING) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]"); } } else { - assert origin == Engine.Operation.Origin.REPLICA; - verifyReplicationTarget(); - if (writeAllowedStatesForReplica.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]"); + if (origin == Engine.Operation.Origin.PRIMARY) { + verifyPrimary(); + } else { + assert origin == Engine.Operation.Origin.REPLICA; + verifyReplicationTarget(); + } + if (writeAllowedStates.contains(state) == false) { + throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStates + ", origin [" + origin + "]"); } } } @@ -1452,7 +1439,7 @@ private void verifyPrimary() { private void verifyReplicationTarget() { final IndexShardState state = state(); - if (shardRouting.primary() && shardRouting.active() && state != IndexShardState.RELOCATED) { + if (shardRouting.primary() && shardRouting.active() && replicationTracker.isPrimaryMode()) { // must use exception that is not ignored by replication logic. See TransportActions.isShardNotAvailableException throw new IllegalStateException("active primary shard " + shardRouting + " cannot be a replication target before " + "relocation hand off, state is [" + state + "]"); @@ -1476,7 +1463,7 @@ private void verifyNotClosed(Exception suppressed) throws IllegalIndexShardState protected final void verifyActive() throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { + if (state != IndexShardState.STARTED) { throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard is active"); } } @@ -1778,7 +1765,7 @@ public ObjectLongMap getInSyncGlobalCheckpoints() { public void maybeSyncGlobalCheckpoint(final String reason) { verifyPrimary(); verifyNotClosed(); - if (state == IndexShardState.RELOCATED) { + if (replicationTracker.isPrimaryMode() == false) { return; } // only sync if there are not operations in flight @@ -1831,7 +1818,7 @@ public void updateGlobalCheckpointOnReplica(final long globalCheckpoint, final S * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move * to recovery finalization, or even finished recovery before the update arrives here. */ - assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED && state() != IndexShardState.RELOCATED : + assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED : "supposedly in-sync shard copy received a global checkpoint [" + globalCheckpoint + "] " + "that is higher than its local checkpoint [" + localCheckpoint + "]"; return; @@ -1850,7 +1837,9 @@ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext p assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && getEngine().getLocalCheckpointTracker().getCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); - replicationTracker.activateWithPrimaryContext(primaryContext); + synchronized (mutex) { + replicationTracker.activateWithPrimaryContext(primaryContext); // make changes to primaryMode flag only under mutex + } } /** @@ -2067,6 +2056,13 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService } } + /** + * Returns whether the shard is in primary mode, i.e., in charge of replicating changes (see {@link ReplicationTracker}). + */ + public boolean isPrimaryMode() { + return replicationTracker.isPrimaryMode(); + } + class ShardEventListener implements Engine.EventListener { private final CopyOnWriteArrayList> delegates = new CopyOnWriteArrayList<>(); @@ -2205,8 +2201,7 @@ public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final // means that the master will fail this shard as all initializing shards are failed when a primary is selected // We abort early here to prevent an ongoing recovery from the failed primary to mess with the global / local checkpoint if (shardState != IndexShardState.POST_RECOVERY && - shardState != IndexShardState.STARTED && - shardState != IndexShardState.RELOCATED) { + shardState != IndexShardState.STARTED) { throw new IndexShardNotStartedException(shardId, shardState); } try { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java index bafa14f2e581f..4ea5c0e74eff2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardRelocatedException.java @@ -30,7 +30,7 @@ public IndexShardRelocatedException(ShardId shardId) { } public IndexShardRelocatedException(ShardId shardId, String reason) { - super(shardId, IndexShardState.RELOCATED, reason); + super(shardId, IndexShardState.STARTED, reason); } public IndexShardRelocatedException(StreamInput in) throws IOException{ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java index d3c6de7136c11..c3711f1baabc3 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -25,16 +25,18 @@ public enum IndexShardState { RECOVERING((byte) 1), POST_RECOVERY((byte) 2), STARTED((byte) 3), - RELOCATED((byte) 4), + // previously, 4 was the RELOCATED state CLOSED((byte) 5); - private static final IndexShardState[] IDS = new IndexShardState[IndexShardState.values().length]; + private static final IndexShardState[] IDS = new IndexShardState[IndexShardState.values().length + 1]; // +1 for RELOCATED state static { for (IndexShardState state : IndexShardState.values()) { assert state.id() < IDS.length && state.id() >= 0; IDS[state.id()] = state; } + assert IDS[4] == null; + IDS[4] = STARTED; // for backward compatibility reasons (this was the RELOCATED state) } private final byte id; diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index e4eeee27e9ecc..d8e2ec5354764 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -89,7 +89,7 @@ public class IndexingMemoryController extends AbstractComponent implements Index private final Cancellable scheduler; private static final EnumSet CAN_WRITE_INDEX_BUFFER_STATES = EnumSet.of( - IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED); + IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); private final ShardsIndicesStatusChecker statusChecker; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index d6a802c30660c..2189e6b2fb2a8 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -236,8 +236,8 @@ private void runUnderPrimaryPermit(CancellableThreads.Interruptable runnable, St shard.acquirePrimaryOperationPermit(onAcquired, ThreadPool.Names.SAME, reason); try (Releasable ignored = onAcquired.actionGet()) { // check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent - // races, as IndexShard will change to RELOCATED only when it holds all operation permits, see IndexShard.relocated() - if (shard.state() == IndexShardState.RELOCATED) { + // races, as IndexShard will switch its authority only when it holds all operation permits, see IndexShard.relocated() + if (shard.isPrimaryMode() == false) { throw new IndexShardRelocatedException(shard.shardId()); } runnable.run(); @@ -501,9 +501,9 @@ public void finalizeRecovery(final long targetLocalCheckpoint) throws IOExceptio if (request.isPrimaryRelocation()) { logger.trace("performing relocation hand-off"); // this acquires all IndexShard operation permits and will thus delay new recoveries until it is done - cancellableThreads.execute(() -> shard.relocated("to " + request.targetNode(), recoveryTarget::handoffPrimaryContext)); + cancellableThreads.execute(() -> shard.relocated(recoveryTarget::handoffPrimaryContext)); /* - * if the recovery process fails after setting the shard state to RELOCATED, both relocation source and + * if the recovery process fails after disabling primary mode on the source shard, both relocation source and * target are failed (see {@link IndexShard#updateRoutingEntry}). */ } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 91d3332f8e646..eb0db395a155f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -209,7 +209,7 @@ boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOE } RecoveryState.Stage stage = indexShard.recoveryState().getStage(); if (indexShard.recoveryState().getPrimary() && (stage == RecoveryState.Stage.FINALIZE || stage == RecoveryState.Stage.DONE)) { - // once primary relocation has moved past the finalization step, the relocation source can be moved to RELOCATED state + // once primary relocation has moved past the finalization step, the relocation source can put the target into primary mode // and start indexing as primary into the target shard (see TransportReplicationAction). Resetting the target shard in this // state could mean that indexing is halted until the recovery retry attempt is completed and could also destroy existing // documents indexed and acknowledged before the reset. diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 37f67ddf102ac..29f6e7aeeecc1 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -76,7 +76,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe Setting.positiveTimeSetting("indices.store.delete.shard.timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope); public static final String ACTION_SHARD_EXISTS = "internal:index/shard/exists"; - private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED); + private static final EnumSet ACTIVE_STATES = EnumSet.of(IndexShardState.STARTED); private final IndicesService indicesService; private final ClusterService clusterService; private final TransportService transportService; diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index b9688053fba2d..4e7844950d6b2 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -685,6 +685,7 @@ public void testSeqNoIsSetOnPrimary() throws Exception { final IndexShard shard = mock(IndexShard.class); when(shard.getPrimaryTerm()).thenReturn(primaryTerm); when(shard.routingEntry()).thenReturn(routingEntry); + when(shard.isPrimaryMode()).thenReturn(true); IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().shardRoutingTable(shardId); Set inSyncIds = randomBoolean() ? Collections.singleton(routingEntry.allocationId().getId()) : clusterService.state().metaData().index(index).inSyncAllocationIds(0); @@ -1217,7 +1218,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService } return routing; }); - when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED); + when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false); doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class)); when(indexShard.getPrimaryTerm()).thenAnswer(i -> clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java index bed1b5de03750..d32fbf1578714 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/TransportWriteActionTests.java @@ -472,7 +472,7 @@ private IndexShard mockIndexShard(ShardId shardId, ClusterService clusterService } return routing; }); - when(indexShard.state()).thenAnswer(invocationOnMock -> isRelocated.get() ? IndexShardState.RELOCATED : IndexShardState.STARTED); + when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false); doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class)); when(indexShard.getPrimaryTerm()).thenAnswer(i -> clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id())); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index ed98406f343a0..941a967355345 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -641,7 +641,7 @@ public void testOperationPermitOnReplicaShards() throws Exception { routing = newShardRouting(routing.shardId(), routing.currentNodeId(), "otherNode", true, ShardRoutingState.RELOCATING, AllocationId.newRelocation(routing.allocationId())); IndexShardTestCase.updateRoutingEntry(indexShard, routing); - indexShard.relocated("test", primaryContext -> {}); + indexShard.relocated(primaryContext -> {}); engineClosed = false; break; } @@ -1325,7 +1325,7 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { Thread recoveryThread = new Thread(() -> { latch.countDown(); try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1336,14 +1336,14 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { recoveryThread.start(); latch.await(); // recovery can only be finalized after we release the current primaryOperationLock - assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + assertTrue(shard.isPrimaryMode()); } // recovery can be now finalized recoveryThread.join(); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) { // lock can again be acquired - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); } closeShards(shard); @@ -1354,7 +1354,7 @@ public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1385,6 +1385,7 @@ public void onResponse(Releasable releasable) { public void testStressRelocated() throws Exception { final IndexShard shard = newStartedShard(true); + assertTrue(shard.isPrimaryMode()); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node")); final int numThreads = randomIntBetween(2, 4); Thread[] indexThreads = new Thread[numThreads]; @@ -1407,7 +1408,7 @@ public void run() { AtomicBoolean relocated = new AtomicBoolean(); final Thread recoveryThread = new Thread(() -> { try { - shard.relocated("simulated recovery", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } catch (InterruptedException e) { throw new RuntimeException(e); } @@ -1419,15 +1420,15 @@ public void run() { recoveryThread.start(); assertThat(relocated.get(), equalTo(false)); assertThat(shard.getActiveOperationsCount(), greaterThan(0)); - // ensure we only transition to RELOCATED state after pending operations completed - assertThat(shard.state(), equalTo(IndexShardState.STARTED)); + // ensure we only transition after pending operations completed + assertTrue(shard.isPrimaryMode()); // complete pending operations barrier.await(); // complete recovery/relocation recoveryThread.join(); // ensure relocated successfully once pending operations are done assertThat(relocated.get(), equalTo(true)); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + assertFalse(shard.isPrimaryMode()); assertThat(shard.getActiveOperationsCount(), equalTo(0)); for (Thread indexThread : indexThreads) { @@ -1441,7 +1442,7 @@ public void testRelocatedShardCanNotBeRevived() throws IOException, InterruptedE final IndexShard shard = newStartedShard(true); final ShardRouting originalRouting = shard.routingEntry(); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); - shard.relocated("test", primaryContext -> {}); + shard.relocated(primaryContext -> {}); expectThrows(IllegalIndexShardStateException.class, () -> IndexShardTestCase.updateRoutingEntry(shard, originalRouting)); closeShards(shard); } @@ -1451,7 +1452,7 @@ public void testShardCanNotBeMarkedAsRelocatedIfRelocationCancelled() throws IOE final ShardRouting originalRouting = shard.routingEntry(); IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(originalRouting, "other_node")); IndexShardTestCase.updateRoutingEntry(shard, originalRouting); - expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated("test", primaryContext -> {})); + expectThrows(IllegalIndexShardStateException.class, () -> shard.relocated(primaryContext -> {})); closeShards(shard); } @@ -1470,7 +1471,7 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { cyclicBarrier.await(); - shard.relocated("test", primaryContext -> {}); + shard.relocated(primaryContext -> {}); } }); relocationThread.start(); @@ -1491,7 +1492,7 @@ protected void doRun() throws Exception { cyclicBarrier.await(); relocationThread.join(); cancellingThread.join(); - if (shard.state() == IndexShardState.RELOCATED) { + if (shard.isPrimaryMode() == false) { logger.debug("shard was relocated successfully"); assertThat(cancellingException.get(), instanceOf(IllegalIndexShardStateException.class)); assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(true)); @@ -1763,8 +1764,8 @@ public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedExc assertThat(shard.state(), equalTo(IndexShardState.STARTED)); ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node"); IndexShardTestCase.updateRoutingEntry(shard, inRecoveryRouting); - shard.relocated("simulate mark as relocated", primaryContext -> {}); - assertThat(shard.state(), equalTo(IndexShardState.RELOCATED)); + shard.relocated(primaryContext -> {}); + assertFalse(shard.isPrimaryMode()); try { IndexShardTestCase.updateRoutingEntry(shard, origRouting); fail("Expected IndexShardRelocatedException"); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index f36dd9a78b89b..49bbacf46c9bc 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.shard.IndexShardState.CREATED; import static org.elasticsearch.index.shard.IndexShardState.POST_RECOVERY; import static org.elasticsearch.index.shard.IndexShardState.RECOVERING; -import static org.elasticsearch.index.shard.IndexShardState.RELOCATED; import static org.elasticsearch.index.shard.IndexShardState.STARTED; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.CoreMatchers.equalTo; @@ -186,7 +185,7 @@ public void testIndexStateShardChanged() throws Throwable { ensureGreen(); //the 3 relocated shards get closed on the first node - assertShardStatesMatch(stateChangeListenerNode1, 3, RELOCATED, CLOSED); + assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED); //the 3 relocated shards get created on the second node assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 661a1f0635430..4287b675f353c 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -394,7 +394,7 @@ public void testThrowExceptionOnPrimaryRelocatedBeforePhase1Started() throws IOE final IndexShard shard = mock(IndexShard.class); when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class)); when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class)); - when(shard.state()).thenReturn(IndexShardState.RELOCATED); + when(shard.isPrimaryMode()).thenReturn(false); when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class)); doAnswer(invocation -> { ((ActionListener)invocation.getArguments()[0]).onResponse(() -> {}); diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 1efd210b110c8..921b819b9b712 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -85,7 +85,7 @@ public DirectoryService newDirectoryService(ShardPath path) { } private static final EnumSet validCheckIndexStates = EnumSet.of( - IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY + IndexShardState.STARTED, IndexShardState.POST_RECOVERY ); private static final class Listener implements IndexEventListener { From 4417580d053f1b61966b0c1f01881ae710c6a331 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 28 Mar 2018 08:22:06 -0400 Subject: [PATCH 04/68] Remove leftover tests.rest.spec property from docs (#29279) We previously had a property to specify the location of the REST test spec files but this was removed in a previous refactoring yet left behind in the docs. This commit removes the last remaining vestige of this parameter. --- TESTING.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 9b719826de4ff..bfdca2926026f 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -296,7 +296,6 @@ e.g. -Dtests.rest.suite=index,get,create/10_with_id * `tests.rest.blacklist`: comma separated globs that identify tests that are blacklisted and need to be skipped e.g. -Dtests.rest.blacklist=index/*/Index document,get/10_basic/* -* `tests.rest.spec`: REST spec path (default /rest-api-spec/api) Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the `tests.cluster` property, which if present needs to contain a From 63203b228bb7166e62d707b5d67d04ca95f4368e Mon Sep 17 00:00:00 2001 From: Diwas Joshi Date: Wed, 28 Mar 2018 18:31:45 +0530 Subject: [PATCH 05/68] [Docs] Update aggregations.asciidoc (#29265) Add note about accuracy of some aggregation results. --- docs/reference/aggregations.asciidoc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc index f2fdd9a16de82..472b87b72fe67 100644 --- a/docs/reference/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -40,6 +40,10 @@ NOTE: Bucketing aggregations can have sub-aggregations (bucketing or metric). Th aggregations (one can nest an aggregation under a "parent" aggregation, which is itself a sub-aggregation of another higher-level aggregation). +NOTE: Aggregations operate on the `double` representation of + the data. As a consequence, the result may be approximate when running on longs + whose absolute value is greater than `2^53`. + [float] == Structuring Aggregations From 245dd7315685a1463e74793c2bd287d82efc72d2 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 28 Mar 2018 16:09:18 +0200 Subject: [PATCH 06/68] Bulk processor#awaitClose to close scheduler (#29263) When the `BulkProcessor` is used with the high-level REST client, a scheduler is internally created that allows to schedule tasks. Such scheduler is not exposed to users and needs to be closed once the `BulkProcessor` is closed. There are two ways to close the `BulkProcessor` though, one is the ordinary `close` method and the other one is `awaitClose`. The former closes the scheduler while the latter doesn't, leaving threads lingering. --- .../elasticsearch/client/BulkProcessorIT.java | 350 ++++++++++++++++++ .../action/bulk/BulkProcessor.java | 7 +- .../action/bulk/BulkProcessorTests.java | 27 ++ 3 files changed, 382 insertions(+), 2 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java new file mode 100644 index 0000000000000..7f59fcc831213 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -0,0 +1,350 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkProcessor; +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.get.MultiGetItemResponse; +import org.elasticsearch.action.get.MultiGetRequest; +import org.elasticsearch.action.get.MultiGetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.either; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class BulkProcessorIT extends ESRestHighLevelClientTestCase { + + private static BulkProcessor.Builder initBulkProcessorBuilder(BulkProcessor.Listener listener) { + return BulkProcessor.builder(highLevelClient()::bulkAsync, listener); + } + + public void testThatBulkProcessorCountIsCorrect() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + + int numDocs = randomIntBetween(10, 100); + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .build()) { + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + } + + public void testBulkProcessorFlush() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + + int numDocs = randomIntBetween(10, 100); + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that this bulk won't be automatically flushed + .setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100)) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + + assertThat(latch.await(randomInt(500), TimeUnit.MILLISECONDS), equalTo(false)); + //we really need an explicit flush as none of the bulk thresholds was reached + processor.flush(); + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + } + + public void testBulkProcessorConcurrentRequests() throws Exception { + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + int concurrentRequests = randomIntBetween(0, 7); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch); + + MultiGetRequest multiGetRequest; + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + multiGetRequest = indexDocs(processor, numDocs); + + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(expectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs - numDocs % bulkActions)); + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(numDocs)); + + Set ids = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false)); + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + assertThat(bulkItemResponse.getType(), equalTo("test")); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(numDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } + + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + + public void testBulkProcessorWaitOnClose() throws Exception { + BulkProcessorTestListener listener = new BulkProcessorTestListener(); + + int numDocs = randomIntBetween(10, 100); + BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), + RandomPicks.randomFrom(random(), ByteSizeUnit.values()))) + .build(); + + MultiGetRequest multiGetRequest = indexDocs(processor, numDocs); + assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true)); + if (randomBoolean()) { // check if we can call it multiple times + if (randomBoolean()) { + assertThat(processor.awaitClose(1, TimeUnit.MINUTES), is(true)); + } else { + processor.close(); + } + } + + assertThat(listener.beforeCounts.get(), greaterThanOrEqualTo(1)); + assertThat(listener.afterCounts.get(), greaterThanOrEqualTo(1)); + for (Throwable bulkFailure : listener.bulkFailures) { + logger.error("bulk failure", bulkFailure); + } + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), numDocs); + } + + public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { + + String createIndexBody = "{\n" + + " \"settings\" : {\n" + + " \"index\" : {\n" + + " \"blocks.write\" : true\n" + + " }\n" + + " }\n" + + " \n" + + "}"; + + NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON); + Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + + int bulkActions = randomIntBetween(10, 100); + int numDocs = randomIntBetween(bulkActions, bulkActions + 100); + int concurrentRequests = randomIntBetween(0, 10); + + int expectedBulkActions = numDocs / bulkActions; + + final CountDownLatch latch = new CountDownLatch(expectedBulkActions); + int totalExpectedBulkActions = numDocs % bulkActions == 0 ? expectedBulkActions : expectedBulkActions + 1; + final CountDownLatch closeLatch = new CountDownLatch(totalExpectedBulkActions); + + int testDocs = 0; + int testReadOnlyDocs = 0; + MultiGetRequest multiGetRequest = new MultiGetRequest(); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch, closeLatch); + + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setConcurrentRequests(concurrentRequests).setBulkActions(bulkActions) + //set interval and size to high values + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) { + + for (int i = 1; i <= numDocs; i++) { + if (randomBoolean()) { + testDocs++; + processor.add(new IndexRequest("test", "test", Integer.toString(testDocs)) + .source(XContentType.JSON, "field", "value")); + multiGetRequest.add("test", "test", Integer.toString(testDocs)); + } else { + testReadOnlyDocs++; + processor.add(new IndexRequest("test-ro", "test", Integer.toString(testReadOnlyDocs)) + .source(XContentType.JSON, "field", "value")); + } + } + } + + closeLatch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.afterCounts.get(), equalTo(totalExpectedBulkActions)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertThat(listener.bulkItems.size(), equalTo(testDocs + testReadOnlyDocs)); + + Set ids = new HashSet<>(); + Set readOnlyIds = new HashSet<>(); + for (BulkItemResponse bulkItemResponse : listener.bulkItems) { + assertThat(bulkItemResponse.getIndex(), either(equalTo("test")).or(equalTo("test-ro"))); + assertThat(bulkItemResponse.getType(), equalTo("test")); + if (bulkItemResponse.getIndex().equals("test")) { + assertThat(bulkItemResponse.isFailed(), equalTo(false)); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(ids.add(bulkItemResponse.getId()), equalTo(true)); + } else { + assertThat(bulkItemResponse.isFailed(), equalTo(true)); + //with concurrent requests > 1 we can't rely on the order of the bulk requests + assertThat(Integer.valueOf(bulkItemResponse.getId()), both(greaterThan(0)).and(lessThanOrEqualTo(testReadOnlyDocs))); + //we do want to check that we don't get duplicate ids back + assertThat(readOnlyIds.add(bulkItemResponse.getId()), equalTo(true)); + } + } + + assertMultiGetResponse(highLevelClient().multiGet(multiGetRequest), testDocs); + } + + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + for (int i = 1; i <= numDocs; i++) { + if (randomBoolean()) { + processor.add(new IndexRequest("test", "test", Integer.toString(i)) + .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); + } else { + final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" + Integer.toString(i) + "\"} }\n" + + Strings.toString(JsonXContent.contentBuilder() + .startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject()) + "\n"; + processor.add(new BytesArray(source), null, null, XContentType.JSON); + } + multiGetRequest.add("test", "test", Integer.toString(i)); + } + return multiGetRequest; + } + + private static void assertResponseItems(List bulkItemResponses, int numDocs) { + assertThat(bulkItemResponses.size(), is(numDocs)); + int i = 1; + for (BulkItemResponse bulkItemResponse : bulkItemResponses) { + assertThat(bulkItemResponse.getIndex(), equalTo("test")); + assertThat(bulkItemResponse.getType(), equalTo("test")); + assertThat(bulkItemResponse.getId(), equalTo(Integer.toString(i++))); + assertThat("item " + i + " failed with cause: " + bulkItemResponse.getFailureMessage(), + bulkItemResponse.isFailed(), equalTo(false)); + } + } + + private static void assertMultiGetResponse(MultiGetResponse multiGetResponse, int numDocs) { + assertThat(multiGetResponse.getResponses().length, equalTo(numDocs)); + int i = 1; + for (MultiGetItemResponse multiGetItemResponse : multiGetResponse) { + assertThat(multiGetItemResponse.getIndex(), equalTo("test")); + assertThat(multiGetItemResponse.getType(), equalTo("test")); + assertThat(multiGetItemResponse.getId(), equalTo(Integer.toString(i++))); + } + } + + private static class BulkProcessorTestListener implements BulkProcessor.Listener { + + private final CountDownLatch[] latches; + private final AtomicInteger beforeCounts = new AtomicInteger(); + private final AtomicInteger afterCounts = new AtomicInteger(); + private final List bulkItems = new CopyOnWriteArrayList<>(); + private final List bulkFailures = new CopyOnWriteArrayList<>(); + + private BulkProcessorTestListener(CountDownLatch... latches) { + this.latches = latches; + } + + @Override + public void beforeBulk(long executionId, BulkRequest request) { + beforeCounts.incrementAndGet(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + bulkItems.addAll(Arrays.asList(response.getItems())); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + bulkFailures.add(failure); + afterCounts.incrementAndGet(); + for (CountDownLatch latch : latches) { + latch.countDown(); + } + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 668dd230f609b..39a185741db92 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -211,7 +211,6 @@ public void close() { } catch (InterruptedException exc) { Thread.currentThread().interrupt(); } - onClose.run(); } /** @@ -237,7 +236,11 @@ public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws Inter if (bulkRequest.numberOfActions() > 0) { execute(); } - return this.bulkRequestHandler.awaitClose(timeout, unit); + try { + return this.bulkRequestHandler.awaitClose(timeout, unit); + } finally { + onClose.run(); + } } /** diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index 4ff5b69ad378a..3fbfa381ad352 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -32,6 +32,8 @@ import org.junit.Before; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; public class BulkProcessorTests extends ESTestCase { @@ -97,4 +99,29 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) assertNull(threadPool.getThreadContext().getTransient(transientKey)); bulkProcessor.close(); } + + public void testAwaitOnCloseCallsOnClose() throws Exception { + final AtomicBoolean called = new AtomicBoolean(false); + BulkProcessor bulkProcessor = new BulkProcessor((request, listener) -> { + }, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + + } + }, 0, 10, new ByteSizeValue(1000), null, (delay, executor, command) -> null, () -> called.set(true)); + + assertFalse(called.get()); + bulkProcessor.awaitClose(100, TimeUnit.MILLISECONDS); + assertTrue(called.get()); + } } From 27e45fc55205065f5ce87e0034b763fbb7ab7abc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 28 Mar 2018 16:19:45 +0200 Subject: [PATCH 07/68] Remove IndicesOptions bwc serialization layer (#29281) On master we don't need to talk to pre-6.0 nodes anymore. --- .../action/support/IndicesOptions.java | 13 ++----------- .../action/support/IndicesOptionsTests.java | 6 +----- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index b4db289148b1c..64c26d6b94aa5 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.support; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestRequest; @@ -119,20 +118,12 @@ public boolean allowAliasesToMultipleIndices() { public boolean ignoreAliases() { return (id & IGNORE_ALIASES) != 0; } - + public void writeIndicesOptions(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { - out.write(id); - } else { - // if we are talking to a node that doesn't support the newly added flag (ignoreAliases) - // flip to 0 all the bits starting from the 7th - out.write(id & 0x3f); - } + out.write(id); } public static IndicesOptions readIndicesOptions(StreamInput in) throws IOException { - //if we read from a node that doesn't support the newly added flag (ignoreAliases) - //we just receive the old corresponding value with the new flag set to false (default) byte id = in.readByte(); if (id >= VALUES.length) { throw new IllegalArgumentException("No valid missing index type id: " + id); diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 315af13133d30..7feec3153cd53 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -51,11 +51,7 @@ public void testSerialization() throws Exception { assertThat(indicesOptions2.forbidClosedIndices(), equalTo(indicesOptions.forbidClosedIndices())); assertThat(indicesOptions2.allowAliasesToMultipleIndices(), equalTo(indicesOptions.allowAliasesToMultipleIndices())); - if (output.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) { - assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); - } else { - assertFalse(indicesOptions2.ignoreAliases()); - } + assertEquals(indicesOptions2.ignoreAliases(), indicesOptions.ignoreAliases()); } } From c3fdf8fbfb80f72b2bc974c6f29b73c65b593646 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 28 Mar 2018 17:45:44 +0200 Subject: [PATCH 08/68] [Docs] Fix small typo in ranking evaluation docs --- docs/reference/search/rank-eval.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index eace381bfaa48..fa75374110ef6 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -19,7 +19,7 @@ Users have a specific _information need_, e.g. they are looking for gift in a we They usually enters some search terms into a search box or some other web form. All of this information, together with meta information about the user (e.g. the browser, location, earlier preferences etc...) then gets translated into a query to the underlying search system. -The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information_need. +The challenge for search engineers is to tweak this translation process from user entries to a concrete query in such a way, that the search results contain the most relevant information with respect to the users information need. This can only be done if the search result quality is evaluated constantly across a representative test suite of typical user queries, so that improvements in the rankings for one particular query doesn't negatively effect the ranking for other types of queries. In order to get started with search quality evaluation, three basic things are needed: @@ -28,7 +28,7 @@ In order to get started with search quality evaluation, three basic things are n . a collection of typical search requests that users enter into your system . a set of document ratings that judge the documents relevance with respect to a search request+ It is important to note that one set of document ratings is needed per test query, and that - the relevance judgements are based on the _information_need_ of the user that entered the query. + the relevance judgements are based on the information need of the user that entered the query. The ranking evaluation API provides a convenient way to use this information in a ranking evaluation request to calculate different search evaluation metrics. This gives a first estimation of your overall search quality and give you a measurement to optimize against when fine-tuning various aspect of the query generation in your application. From 13e19e7428568e8a71dd42618bbbdd807881f131 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 28 Mar 2018 18:03:34 +0200 Subject: [PATCH 09/68] Allow _update and upsert to read from the transaction log (#29264) We historically removed reading from the transaction log to get consistent results from _GET calls. There was also the motivation that the read-modify-update principle we apply should not be hidden from the user. We still agree on the fact that we should not hide these aspects but the impact on updates is quite significant especially if the same documents is updated before it's written to disk and made serachable. This change adds back the ability to read from the transaction log but only for update calls. Calls to the _GET API will always do a refresh if necessary to return consistent results ie. if stored fields or DocValues Fields are requested. Closes #26802 --- .../explain/TransportExplainAction.java | 4 +- .../action/update/UpdateHelper.java | 6 +- .../uid/PerThreadIDVersionAndSeqNoLookup.java | 2 +- .../lucene/uid/VersionsAndSeqNoResolver.java | 9 +- .../elasticsearch/index/engine/Engine.java | 8 +- .../index/engine/InternalEngine.java | 36 ++- .../index/engine/TranslogLeafReader.java | 237 ++++++++++++++++++ .../index/engine/TranslogVersionValue.java | 71 ++++++ .../index/engine/VersionValue.java | 11 +- .../index/get/ShardGetService.java | 22 +- .../index/termvectors/TermVectorsService.java | 4 +- .../index/translog/BaseTranslogReader.java | 9 + .../index/translog/Translog.java | 27 ++ .../index/translog/TranslogSnapshot.java | 1 - .../index/engine/InternalEngineTests.java | 26 +- .../index/shard/IndexShardTests.java | 6 +- .../index/shard/RefreshListenersTests.java | 4 +- .../index/shard/ShardGetServiceTests.java | 132 ++++++++++ .../index/translog/TranslogTests.java | 23 +- .../index/engine/EngineTestCase.java | 2 +- .../index/shard/IndexShardTestCase.java | 7 +- 21 files changed, 602 insertions(+), 45 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java create mode 100644 server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java create mode 100644 server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 5b20b848f0b04..18c1ea41e95b9 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -112,13 +112,13 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId if (uidTerm == null) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); } - result = context.indexShard().get(new Engine.Get(false, request.type(), request.id(), uidTerm)); + result = context.indexShard().get(new Engine.Get(false, false, request.type(), request.id(), uidTerm)); if (!result.exists()) { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); } context.parsedQuery(context.getQueryShardContext().toQuery(request.query())); context.preProcess(true); - int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase; + int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().docBase; Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); for (RescoreContext ctx : context.rescore()) { Rescorer rescorer = ctx.rescorer(); diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 4ee49f2407b5d..ab10aa710cce6 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -47,7 +47,6 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; @@ -71,9 +70,8 @@ public UpdateHelper(Settings settings, ScriptService scriptService) { * Prepares an update request by converting it into an index or delete request or an update response (no action). */ public Result prepare(UpdateRequest request, IndexShard indexShard, LongSupplier nowInMillis) { - final GetResult getResult = indexShard.getService().get(request.type(), request.id(), - new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME}, - true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE); + final GetResult getResult = indexShard.getService().getForUpdate(request.type(), request.id(), request.version(), + request.versionType()); return prepare(indexShard.shardId(), request, getResult, nowInMillis); } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java index f8ccd827019a4..38fcdfe5f1b62 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDVersionAndSeqNoLookup.java @@ -100,7 +100,7 @@ public DocIdAndVersion lookupVersion(BytesRef id, LeafReaderContext context) if (versions.advanceExact(docID) == false) { throw new IllegalArgumentException("Document [" + docID + "] misses the [" + VersionFieldMapper.NAME + "] field"); } - return new DocIdAndVersion(docID, versions.longValue(), context); + return new DocIdAndVersion(docID, versions.longValue(), context.reader(), context.docBase); } else { return null; } diff --git a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java index 126e4dee51cc2..9db7e3716d51a 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/uid/VersionsAndSeqNoResolver.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.lucene.uid; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.Term; @@ -97,12 +98,14 @@ private VersionsAndSeqNoResolver() { public static class DocIdAndVersion { public final int docId; public final long version; - public final LeafReaderContext context; + public final LeafReader reader; + public final int docBase; - DocIdAndVersion(int docId, long version, LeafReaderContext context) { + public DocIdAndVersion(int docId, long version, LeafReader reader, int docBase) { this.docId = docId; this.version = version; - this.context = context; + this.reader = reader; + this.docBase = docBase; } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 1ca4468539da1..6cc8c4197dcd5 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1232,14 +1232,16 @@ public static class Get { private final boolean realtime; private final Term uid; private final String type, id; + private final boolean readFromTranslog; private long version = Versions.MATCH_ANY; private VersionType versionType = VersionType.INTERNAL; - public Get(boolean realtime, String type, String id, Term uid) { + public Get(boolean realtime, boolean readFromTranslog, String type, String id, Term uid) { this.realtime = realtime; this.type = type; this.id = id; this.uid = uid; + this.readFromTranslog = readFromTranslog; } public boolean realtime() { @@ -1275,6 +1277,10 @@ public Get versionType(VersionType versionType) { this.versionType = versionType; return this; } + + public boolean isReadFromTranslog() { + return readFromTranslog; + } } public static class GetResult implements Releasable { diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 0fda2f04ac5a4..864385667f5fe 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -78,6 +78,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -145,6 +146,7 @@ public class InternalEngine extends Engine { * being indexed/deleted. */ private final AtomicLong writingBytes = new AtomicLong(); + private final AtomicBoolean trackTranslogLocation = new AtomicBoolean(false); @Nullable private final String historyUUID; @@ -558,6 +560,27 @@ public GetResult get(Get get, BiFunction search throw new VersionConflictEngineException(shardId, get.type(), get.id(), get.versionType().explainConflictForReads(versionValue.version, get.version())); } + if (get.isReadFromTranslog()) { + // this is only used for updates - API _GET calls will always read form a reader for consistency + // the update call doesn't need the consistency since it's source only + _parent but parent can go away in 7.0 + if (versionValue.getLocation() != null) { + try { + Translog.Operation operation = translog.readOperation(versionValue.getLocation()); + if (operation != null) { + // in the case of a already pruned translog generation we might get null here - yet very unlikely + TranslogLeafReader reader = new TranslogLeafReader((Translog.Index) operation, engineConfig + .getIndexSettings().getIndexVersionCreated()); + return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader)), + new VersionsAndSeqNoResolver.DocIdAndVersion(0, ((Translog.Index) operation).version(), reader, 0)); + } + } catch (IOException e) { + maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event + throw new EngineException(shardId, "failed to read operation from translog", e); + } + } else { + trackTranslogLocation.set(true); + } + } refresh("realtime_get", SearcherScope.INTERNAL); } scope = SearcherScope.INTERNAL; @@ -790,6 +813,10 @@ public IndexResult index(Index index) throws IOException { } indexResult.setTranslogLocation(location); } + if (plan.indexIntoLucene && indexResult.hasFailure() == false) { + versionMap.maybePutUnderLock(index.uid().bytes(), + getVersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm(), indexResult.getTranslogLocation())); + } if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { localCheckpointTracker.markSeqNoAsCompleted(indexResult.getSeqNo()); } @@ -916,8 +943,6 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false); index(index.docs(), indexWriter); } - versionMap.maybePutUnderLock(index.uid().bytes(), - new VersionValue(plan.versionForIndexing, plan.seqNoForIndexing, index.primaryTerm())); return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } catch (Exception ex) { if (indexWriter.getTragicException() == null) { @@ -941,6 +966,13 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) } } + private VersionValue getVersionValue(long version, long seqNo, long term, Translog.Location location) { + if (location != null && trackTranslogLocation.get()) { + return new TranslogVersionValue(location, version, seqNo, term); + } + return new VersionValue(version, seqNo, term); + } + /** * returns true if the indexing operation may have already be processed by this engine. * Note that it is OK to rarely return true even if this is not the case. However a `false` diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java new file mode 100644 index 0000000000000..628bfd4826935 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -0,0 +1,237 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.BinaryDocValues; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.LeafMetaData; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.PointValues; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Terms; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.index.fielddata.AbstractSortedDocValues; +import org.elasticsearch.index.fielddata.AbstractSortedSetDocValues; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.index.translog.Translog; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; + +/** + * Internal class that mocks a single doc read from the transaction log as a leaf reader. + */ +final class TranslogLeafReader extends LeafReader { + + private final Translog.Index operation; + private static final FieldInfo FAKE_SOURCE_FIELD + = new FieldInfo(SourceFieldMapper.NAME, 1, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_ROUTING_FIELD + = new FieldInfo(RoutingFieldMapper.NAME, 2, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_ID_FIELD + = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private static final FieldInfo FAKE_UID_FIELD + = new FieldInfo(UidFieldMapper.NAME, 4, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), + 0,0); + private final Version indexVersionCreated; + + TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { + this.operation = operation; + this.indexVersionCreated = indexVersionCreated; + } + @Override + public CacheHelper getCoreCacheHelper() { + throw new UnsupportedOperationException(); + } + + @Override + public Terms terms(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public NumericDocValues getNumericDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public BinaryDocValues getBinaryDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedDocValues getSortedDocValues(String field) { + // TODO this can be removed in 7.0 and upwards we don't support the parent field anymore + if (field.startsWith(ParentFieldMapper.NAME + "#") && operation.parent() != null) { + return new AbstractSortedDocValues() { + @Override + public int docID() { + return 0; + } + + private final BytesRef term = new BytesRef(operation.parent()); + private int ord; + @Override + public boolean advanceExact(int docID) { + if (docID != 0) { + throw new IndexOutOfBoundsException("do such doc ID: " + docID); + } + ord = 0; + return true; + } + + @Override + public int ordValue() { + return ord; + } + + @Override + public BytesRef lookupOrd(int ord) { + if (ord == 0) { + return term; + } + return null; + } + + @Override + public int getValueCount() { + return 1; + } + }; + } + if (operation.parent() == null) { + return null; + } + assert false : "unexpected field: " + field; + return null; + } + + @Override + public SortedNumericDocValues getSortedNumericDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public SortedSetDocValues getSortedSetDocValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public NumericDocValues getNormValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public FieldInfos getFieldInfos() { + throw new UnsupportedOperationException(); + } + + @Override + public Bits getLiveDocs() { + throw new UnsupportedOperationException(); + } + + @Override + public PointValues getPointValues(String field) { + throw new UnsupportedOperationException(); + } + + @Override + public void checkIntegrity() { + + } + + @Override + public LeafMetaData getMetaData() { + throw new UnsupportedOperationException(); + } + + @Override + public Fields getTermVectors(int docID) { + throw new UnsupportedOperationException(); + } + + @Override + public int numDocs() { + return 1; + } + + @Override + public int maxDoc() { + return 1; + } + + @Override + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + if (docID != 0) { + throw new IllegalArgumentException("no such doc ID " + docID); + } + if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) { + assert operation.source().toBytesRef().offset == 0; + assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length; + visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes); + } + if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) { + visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8)); + } + if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) { + final byte[] id; + if (indexVersionCreated.onOrAfter(Version.V_6_0_0)) { + BytesRef bytesRef = Uid.encodeId(operation.id()); + id = new byte[bytesRef.length]; + System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length); + } else { // TODO this can go away in 7.0 after backport + id = operation.id().getBytes(StandardCharsets.UTF_8); + } + visitor.stringField(FAKE_ID_FIELD, id); + } + if (visitor.needsField(FAKE_UID_FIELD) == StoredFieldVisitor.Status.YES) { + visitor.stringField(FAKE_UID_FIELD, Uid.createUid(operation.type(), operation.id()).getBytes(StandardCharsets.UTF_8)); + } + } + + @Override + protected void doClose() { + + } + + @Override + public CacheHelper getReaderCacheHelper() { + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java new file mode 100644 index 0000000000000..67415ea6139a6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogVersionValue.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.index.translog.Translog; + +import java.util.Objects; + +final class TranslogVersionValue extends VersionValue { + + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(TranslogVersionValue.class); + + private final Translog.Location translogLocation; + + TranslogVersionValue(Translog.Location translogLocation, long version, long seqNo, long term) { + super(version, seqNo, term); + this.translogLocation = translogLocation; + } + + @Override + public long ramBytesUsed() { + return RAM_BYTES_USED; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + TranslogVersionValue that = (TranslogVersionValue) o; + return Objects.equals(translogLocation, that.translogLocation); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), translogLocation); + } + + @Override + public String toString() { + return "TranslogVersionValue{" + + "version=" + version + + ", seqNo=" + seqNo + + ", term=" + term + + ", location=" + translogLocation + + '}'; + } + + @Override + public Translog.Location getLocation() { + return translogLocation; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java index e2a2614d6c102..d63306486732e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionValue.java @@ -21,6 +21,8 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.index.translog.Translog; import java.util.Collection; import java.util.Collections; @@ -81,9 +83,16 @@ public int hashCode() { public String toString() { return "VersionValue{" + "version=" + version + - ", seqNo=" + seqNo + ", term=" + term + '}'; } + + /** + * Returns the translog location for this version value or null. This is optional and might not be tracked all the time. + */ + @Nullable + public Translog.Location getLocation() { + return null; + } } diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index dcd18c8f313f9..a6c8dbf53b395 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -42,6 +42,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.IndexShard; @@ -75,10 +76,15 @@ public GetStats stats() { } public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { + return get(type, id, gFields, realtime, version, versionType, fetchSourceContext, false); + } + + private GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, + FetchSourceContext fetchSourceContext, boolean readFromTranslog) { currentMetric.inc(); try { long now = System.nanoTime(); - GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext); + GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext, readFromTranslog); if (getResult.isExists()) { existsMetric.inc(System.nanoTime() - now); @@ -91,6 +97,11 @@ public GetResult get(String type, String id, String[] gFields, boolean realtime, } } + public GetResult getForUpdate(String type, String id, long version, VersionType versionType) { + return get(type, id, new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME}, true, version, versionType, + FetchSourceContext.FETCH_SOURCE, true); + } + /** * Returns {@link GetResult} based on the specified {@link org.elasticsearch.index.engine.Engine.GetResult} argument. * This method basically loads specified fields for the associated document in the engineGetResult. @@ -137,7 +148,8 @@ private FetchSourceContext normalizeFetchSourceContent(@Nullable FetchSourceCont return FetchSourceContext.DO_NOT_FETCH_SOURCE; } - private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { + private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, + FetchSourceContext fetchSourceContext, boolean readFromTranslog) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); final Collection types; if (type == null || type.equals("_all")) { @@ -150,7 +162,7 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea for (String typeX : types) { Term uidTerm = mapperService.createUidTerm(typeX, id); if (uidTerm != null) { - get = indexShard.get(new Engine.Get(realtime, typeX, id, uidTerm) + get = indexShard.get(new Engine.Get(realtime, readFromTranslog, typeX, id, uidTerm) .version(version).versionType(versionType)); if (get.exists()) { type = typeX; @@ -180,7 +192,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] FieldsVisitor fieldVisitor = buildFieldsVisitors(gFields, fetchSourceContext); if (fieldVisitor != null) { try { - docIdAndVersion.context.reader().document(docIdAndVersion.docId, fieldVisitor); + docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor); } catch (IOException e) { throw new ElasticsearchException("Failed to get type [" + type + "] and id [" + id + "]", e); } @@ -197,7 +209,7 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] DocumentMapper docMapper = mapperService.documentMapper(type); if (docMapper.parentFieldMapper().active()) { - String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.context.reader(), docIdAndVersion.docId); + String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.reader, docIdAndVersion.docId); if (fields == null) { fields = new HashMap<>(1); } diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index d527fa83501b3..573e75d78060a 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -85,7 +85,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ termVectorsResponse.setExists(false); return termVectorsResponse; } - Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), request.type(), request.id(), uidTerm) + Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm) .version(request.version()).versionType(request.versionType())); Fields termVectorsByField = null; @@ -114,7 +114,7 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ /* or from an existing document */ else if (docIdAndVersion != null) { // fields with stored term vectors - termVectorsByField = docIdAndVersion.context.reader().getTermVectors(docIdAndVersion.docId); + termVectorsByField = docIdAndVersion.reader.getTermVectors(docIdAndVersion.docId); Set selectedFields = request.selectedFields(); // generate tvs for fields where analyzer is overridden if (selectedFields == null && request.perFieldAnalyzer() != null) { diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index d86c4491b63e9..14ee8ecb9b3c0 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -126,4 +126,13 @@ public Path path() { public long getLastModifiedTime() throws IOException { return Files.getLastModifiedTime(path).toMillis(); } + + /** + * Reads a single opertation from the given location. + */ + Translog.Operation read(Translog.Location location) throws IOException { + assert location.generation == this.generation : "generation mismatch expected: " + generation + " got: " + location.generation; + ByteBuffer buffer = ByteBuffer.allocate(location.size); + return read(checksummedStream(buffer, location.translogLocation, location.size, null)); + } } diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 0043472b72f7c..62e47d08ded54 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -571,6 +571,33 @@ public Snapshot newSnapshotFromGen(long minGeneration) throws IOException { } } + /** + * Reads and returns the operation from the given location if the generation it references is still available. Otherwise + * this method will return null. + */ + public Operation readOperation(Location location) throws IOException { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + if (location.generation < getMinFileGeneration()) { + return null; + } + if (current.generation == location.generation) { + // no need to fsync here the read operation will ensure that buffers are written to disk + // if they are still in RAM and we are reading onto that position + return current.read(location); + } else { + // read backwards - it's likely we need to read on that is recent + for (int i = readers.size() - 1; i >= 0; i--) { + TranslogReader translogReader = readers.get(i); + if (translogReader.generation == location.generation) { + return translogReader.read(location); + } + } + } + } + return null; + } + public Snapshot newSnapshotFromMinSeqNo(long minSeqNo) throws IOException { try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index 656772fa8169d..5f6d14e192eb8 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -104,5 +104,4 @@ public String toString() { ", reusableBuffer=" + reusableBuffer + '}'; } - } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 1ecb1829234ab..bba05401d4155 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1238,7 +1238,7 @@ public void testVersionedUpdate() throws IOException { Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(1, get.version()); } @@ -1246,7 +1246,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_1_result = engine.index(update_1); assertThat(update_1_result.getVersion(), equalTo(2L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(2, get.version()); } @@ -1254,7 +1254,7 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_2_result = engine.index(update_2); assertThat(update_2_result.getVersion(), equalTo(3L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(3, get.version()); } @@ -1765,7 +1765,7 @@ public void testVersioningPromotedReplica() throws IOException { assertOpsOnReplica(replicaOps, replicaEngine, true); final int opsOnPrimary = assertOpsOnPrimary(primaryOps, finalReplicaVersion, deletedOnReplica, replicaEngine); final long currentSeqNo = getSequenceID(replicaEngine, - new Engine.Get(false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); + new Engine.Get(false, false, "type", lastReplicaOp.uid().text(), lastReplicaOp.uid())).v1(); try (Searcher searcher = engine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); @@ -1830,9 +1830,9 @@ class OpAndVersion { throw new AssertionError(e); } for (int op = 0; op < opsPerThread; op++) { - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); - get.docIdAndVersion().context.reader().document(get.docIdAndVersion().docId, visitor); + get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = new ArrayList<>(Strings.commaDelimitedListToSet(visitor.source().utf8ToString())); String removed = op % 3 == 0 && values.size() > 0 ? values.remove(0) : null; String added = "v_" + idGenerator.incrementAndGet(); @@ -1872,9 +1872,9 @@ class OpAndVersion { assertTrue(op.added + " should not exist", exists); } - try (Engine.GetResult get = engine.get(new Engine.Get(true, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); - get.docIdAndVersion().context.reader().document(get.docIdAndVersion().docId, visitor); + get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = Arrays.asList(Strings.commaDelimitedListToStringArray(visitor.source().utf8ToString())); assertThat(currentValues, equalTo(new HashSet<>(values))); } @@ -2275,7 +2275,7 @@ public void testEnableGcDeletes() throws Exception { engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document (we never indexed uid=2): - getResult = engine.get(new Engine.Get(true, "type", "2", newUid("2")), searcherFactory); + getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")), searcherFactory); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: @@ -3450,7 +3450,7 @@ public void afterRefresh(boolean didRefresh) throws IOException { } public void testSequenceIDs() throws Exception { - Tuple seqID = getSequenceID(engine, new Engine.Get(false, "type", "2", newUid("1"))); + Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, "type", "2", newUid("1"))); // Non-existent doc returns no seqnum and no primary term assertThat(seqID.v1(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(seqID.v2(), equalTo(0L)); @@ -3665,7 +3665,7 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio } assertThat(engine.getLocalCheckpointTracker().getCheckpoint(), equalTo(expectedLocalCheckpoint)); - try (Engine.GetResult result = engine.get(new Engine.Get(true, "type", "2", uid), searcherFactory)) { + try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); } } @@ -4454,14 +4454,14 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup CountDownLatch awaitStarted = new CountDownLatch(1); Thread thread = new Thread(() -> { awaitStarted.countDown(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc3.type(), doc3.id(), doc3.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.type(), doc3.id(), doc3.uid()), engine::acquireSearcher)) { assertTrue(getResult.exists()); } }); thread.start(); awaitStarted.await(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, doc.type(), doc.id(), doc.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), doc.uid()), engine::acquireSearcher)) { assertFalse(getResult.exists()); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 941a967355345..7aa597c2d4d42 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1185,7 +1185,7 @@ public void testRefreshMetric() throws IOException { } long refreshCount = shard.refreshStats().getTotal(); indexDoc(shard, "test", "test"); - try (Engine.GetResult ignored = shard.get(new Engine.Get(true, "test", "test", + try (Engine.GetResult ignored = shard.get(new Engine.Get(true, false, "test", "test", new Term(IdFieldMapper.NAME, Uid.encodeId("test"))))) { assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount+1)); } @@ -1833,7 +1833,7 @@ public void testSearcherWrapperIsUsed() throws IOException { indexDoc(shard, "test", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("test"); - Engine.GetResult getResult = shard.get(new Engine.Get(false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); getResult.release(); @@ -1867,7 +1867,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); assertEquals(search.totalHits, 1); } - getResult = newShard.get(new Engine.Get(false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + getResult = newShard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 0609477dda8e5..1bd98cd1c9e69 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -323,12 +323,12 @@ public void testLotsOfThreads() throws Exception { } listener.assertNoError(); - Engine.Get get = new Engine.Get(false, "test", threadId, new Term(IdFieldMapper.NAME, threadId)); + Engine.Get get = new Engine.Get(false, false, "test", threadId, new Term(IdFieldMapper.NAME, threadId)); try (Engine.GetResult getResult = engine.get(get, engine::acquireSearcher)) { assertTrue("document not found", getResult.exists()); assertEquals(iteration, getResult.version()); SingleFieldsVisitor visitor = new SingleFieldsVisitor("test"); - getResult.docIdAndVersion().context.reader().document(getResult.docIdAndVersion().docId, visitor); + getResult.docIdAndVersion().reader.document(getResult.docIdAndVersion().docId, visitor); assertEquals(Arrays.asList(testFieldValue), visitor.fields().get("test")); } } catch (Exception t) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java new file mode 100644 index 0000000000000..c626f2d18522c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.ParentFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +public class ShardGetServiceTests extends IndexShardTestCase { + + public void testGetForUpdate() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(primary); + Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog + } + primary.getEngine().refresh("test"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 2); + } + + // now again from the reader + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, "foobar", null); + assertTrue(primary.getEngine().refreshNeeded()); + testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(RoutingFieldMapper.NAME).getValue()); + + closeShards(primary); + } + + public void testGetForUpdateWithParentField() throws IOException { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.version.created", Version.V_5_6_0) // for parent field mapper + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("parent", "{ \"properties\": {}}") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}, \"_parent\": { \"type\": \"parent\"}}") + .settings(settings) + .primaryTerm(0, 1).build(); + IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); + recoverShardFromStore(primary); + Engine.IndexResult test = indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet = primary.getService().getForUpdate("test", "0", test.getVersion(), VersionType.INTERNAL); + assertFalse(testGet.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals(new String(testGet.source(), StandardCharsets.UTF_8), "{\"foo\" : \"bar\"}"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we refreshed + } + + Engine.IndexResult test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + assertTrue(primary.getEngine().refreshNeeded()); + GetResult testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 1); // we read from the translog + } + primary.getEngine().refresh("test"); + try (Engine.Searcher searcher = primary.getEngine().acquireSearcher("test", Engine.SearcherScope.INTERNAL)) { + assertEquals(searcher.reader().maxDoc(), 2); + } + + // now again from the reader + test1 = indexDoc(primary, "test", "1", "{\"foo\" : \"baz\"}", XContentType.JSON, null, "foobar"); + assertTrue(primary.getEngine().refreshNeeded()); + testGet1 = primary.getService().getForUpdate("test", "1", test1.getVersion(), VersionType.INTERNAL); + assertEquals(new String(testGet1.source(), StandardCharsets.UTF_8), "{\"foo\" : \"baz\"}"); + assertTrue(testGet1.getFields().containsKey(ParentFieldMapper.NAME)); + assertFalse(testGet1.getFields().containsKey(RoutingFieldMapper.NAME)); + assertEquals("foobar", testGet1.getFields().get(ParentFieldMapper.NAME).getValue()); + + closeShards(primary); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 2317d8fb0d8bf..61e5cdcfd953a 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -235,9 +235,9 @@ private TranslogConfig getTranslogConfig(final Path path, final Settings setting return new TranslogConfig(shardId, path, indexSettings, NON_RECYCLING_INSTANCE, bufferSize); } - private void addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { + private Location addToTranslogAndList(Translog translog, List list, Translog.Operation op) throws IOException { list.add(op); - translog.add(op); + return translog.add(op); } public void testIdParsingFromFile() { @@ -579,6 +579,19 @@ public void testSnapshot() throws IOException { } } + public void testReadLocation() throws IOException { + ArrayList ops = new ArrayList<>(); + ArrayList locs = new ArrayList<>(); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{1}))); + int i = 0; + for (Translog.Operation op : ops) { + assertEquals(op, translog.readOperation(locs.get(i++))); + } + assertNull(translog.readOperation(new Location(100, 0, 0))); + } + public void testSnapshotWithNewTranslog() throws IOException { List toClose = new ArrayList<>(); try { @@ -689,6 +702,9 @@ public void testConcurrentWritesWithVaryingSize() throws Throwable { Translog.Operation op = snapshot.next(); assertNotNull(op); Translog.Operation expectedOp = locationOperation.operation; + if (randomBoolean()) { + assertEquals(expectedOp, translog.readOperation(locationOperation.location)); + } assertEquals(expectedOp.opType(), op.opType()); switch (op.opType()) { case INDEX: @@ -1643,6 +1659,9 @@ public void run() { Translog.Location loc = add(op); writtenOperations.add(new LocationOperation(op, loc)); + if (rarely()) { // lets verify we can concurrently read this + assertEquals(op, translog.readOperation(loc)); + } afterAdd(); } } catch (Exception t) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 8a9ad3d2a76e1..667adf9d990cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -471,7 +471,7 @@ protected Term newUid(ParsedDocument doc) { } protected Engine.Get newGet(boolean realtime, ParsedDocument doc) { - return new Engine.Get(realtime, doc.type(), doc.id(), newUid(doc)); + return new Engine.Get(realtime, false, doc.type(), doc.id(), newUid(doc)); } protected Engine.Index indexForDoc(ParsedDocument doc) { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 6d6cc36d78b1b..2656855b9fd15 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -548,12 +548,15 @@ protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id) } protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source) throws IOException { - return indexDoc(shard, type, id, source, XContentType.JSON); + return indexDoc(shard, type, id, source, XContentType.JSON, null, null); } - protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType) + protected Engine.IndexResult indexDoc(IndexShard shard, String type, String id, String source, XContentType xContentType, + String routing, String parentId) throws IOException { SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source), xContentType); + sourceToParse.routing(routing); + sourceToParse.parent(parentId); if (shard.routingEntry().primary()) { final Engine.IndexResult result = shard.applyIndexOperationOnPrimary(Versions.MATCH_ANY, VersionType.INTERNAL, sourceToParse, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, getMappingUpdater(shard, type)); From 4ef3de40bca75f3b745c8d7adf60d8307a479bf2 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 28 Mar 2018 16:25:01 -0400 Subject: [PATCH 10/68] Fix handling of bad requests (#29249) Today we have a few problems with how we handle bad requests: - handling requests with bad encoding - handling requests with invalid value for filter_path/pretty/human - handling requests with a garbage Content-Type header There are two problems: - in every case, we give an empty response to the client - in most cases, we leak the byte buffer backing the request! These problems are caused by a broader problem: poor handling preparing the request for handling, or the channel to write to when the response is ready. This commit addresses these issues by taking a unified approach to all of them that ensures that: - we respond to the client with the exception that blew us up - we do not leak the byte buffer backing the request --- .../http/netty4/Netty4HttpRequest.java | 38 +++++- .../http/netty4/Netty4HttpRequestHandler.java | 125 +++++++++++++++--- .../http/netty4/Netty4BadRequestTests.java | 108 +++++++++++++++ .../http/netty4/Netty4HttpChannelTests.java | 3 +- .../rest/Netty4BadRequestIT.java | 26 ++++ .../rest/AbstractRestChannel.java | 7 + .../org/elasticsearch/rest/RestRequest.java | 100 +++++++++----- .../rest/BytesRestResponseTests.java | 43 +++--- .../rest/RestControllerTests.java | 5 +- .../elasticsearch/rest/RestRequestTests.java | 23 +++- .../test/rest/FakeRestRequest.java | 4 +- 11 files changed, 398 insertions(+), 84 deletions(-) create mode 100644 modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java index f3099db08e992..5194c762b7e43 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequest.java @@ -23,7 +23,6 @@ import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpMethod; - import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -45,6 +44,15 @@ public class Netty4HttpRequest extends RestRequest { private final Channel channel; private final BytesReference content; + /** + * Construct a new request. + * + * @param xContentRegistry the content registry + * @param request the underlying request + * @param channel the channel for the request + * @throws BadParameterException if the parameters can not be decoded + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed + */ Netty4HttpRequest(NamedXContentRegistry xContentRegistry, FullHttpRequest request, Channel channel) { super(xContentRegistry, request.uri(), new HttpHeadersMap(request.headers())); this.request = request; @@ -56,6 +64,34 @@ public class Netty4HttpRequest extends RestRequest { } } + /** + * Construct a new request. In contrast to + * {@link Netty4HttpRequest#Netty4HttpRequest(NamedXContentRegistry, Map, String, FullHttpRequest, Channel)}, the URI is not decoded so + * this constructor will not throw a {@link BadParameterException}. + * + * @param xContentRegistry the content registry + * @param params the parameters for the request + * @param uri the path for the request + * @param request the underlying request + * @param channel the channel for the request + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed + */ + Netty4HttpRequest( + final NamedXContentRegistry xContentRegistry, + final Map params, + final String uri, + final FullHttpRequest request, + final Channel channel) { + super(xContentRegistry, params, uri, new HttpHeadersMap(request.headers())); + this.request = request; + this.channel = channel; + if (request.content().isReadable()) { + this.content = Netty4Utils.toBytesReference(request.content()); + } else { + this.content = BytesArray.EMPTY; + } + } + public FullHttpRequest request() { return this.request; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 6da0f5433bae6..1fd18b2a016d7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -20,15 +20,21 @@ package org.elasticsearch.http.netty4; import io.netty.buffer.Unpooled; +import io.netty.channel.Channel; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaders; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; +import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; +import java.util.Collections; + @ChannelHandler.Sharable class Netty4HttpRequestHandler extends SimpleChannelInboundHandler { @@ -56,32 +62,113 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except request = (FullHttpRequest) msg; } - final FullHttpRequest copy = + boolean success = false; + try { + + final FullHttpRequest copy = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); + + Exception badRequestCause = null; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final Netty4HttpRequest httpRequest; + { + Netty4HttpRequest innerHttpRequest; + try { + innerHttpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutContentTypeHeader(copy, ctx.channel(), badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutParameters(copy, ctx.channel()); + } + httpRequest = innerHttpRequest; + } + + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these + * parameter values. + */ + final Netty4HttpChannel channel; + { + Netty4HttpChannel innerChannel; + try { + innerChannel = + new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } catch (final IllegalArgumentException e) { + if (badRequestCause == null) { + badRequestCause = e; + } else { + badRequestCause.addSuppressed(e); + } + final Netty4HttpRequest innerRequest = + new Netty4HttpRequest( + serverTransport.xContentRegistry, + Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters + copy.uri(), + copy, + ctx.channel()); + innerChannel = + new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } + channel = innerChannel; + } + + if (request.decoderResult().isFailure()) { + serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); + } else if (badRequestCause != null) { + serverTransport.dispatchBadRequest(httpRequest, channel, badRequestCause); + } else { + serverTransport.dispatchRequest(httpRequest, channel); + } + success = true; + } finally { + // the request is otherwise released in case of dispatch + if (success == false && pipelinedRequest != null) { + pipelinedRequest.release(); + } + } + } + + private Netty4HttpRequest requestWithoutContentTypeHeader( + final FullHttpRequest request, final Channel channel, final Exception badRequestCause) { + final HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(request.headers()); + headersWithoutContentTypeHeader.remove("Content-Type"); + final FullHttpRequest requestWithoutContentTypeHeader = new DefaultFullHttpRequest( request.protocolVersion(), request.method(), request.uri(), - Unpooled.copiedBuffer(request.content()), - request.headers(), - request.trailingHeaders()); - final Netty4HttpRequest httpRequest; + request.content(), + headersWithoutContentTypeHeader, // remove the Content-Type header so as to not parse it again + request.trailingHeaders()); // Content-Type can not be a trailing header try { - httpRequest = new Netty4HttpRequest(serverTransport.xContentRegistry, copy, ctx.channel()); - } catch (Exception ex) { - if (pipelinedRequest != null) { - pipelinedRequest.release(); - } - throw ex; + return new Netty4HttpRequest(serverTransport.xContentRegistry, requestWithoutContentTypeHeader, channel); + } catch (final RestRequest.BadParameterException e) { + badRequestCause.addSuppressed(e); + return requestWithoutParameters(requestWithoutContentTypeHeader, channel); } - final Netty4HttpChannel channel = - new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + } - if (request.decoderResult().isSuccess()) { - serverTransport.dispatchRequest(httpRequest, channel); - } else { - assert request.decoderResult().isFailure(); - serverTransport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); - } + private Netty4HttpRequest requestWithoutParameters(final FullHttpRequest request, final Channel channel) { + // remove all parameters as at least one is incorrectly encoded + return new Netty4HttpRequest(serverTransport.xContentRegistry, Collections.emptyMap(), request.uri(), request, channel); } @Override diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java new file mode 100644 index 0000000000000..094f339059876 --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.netty4; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collection; +import java.util.Collections; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class Netty4BadRequestTests extends ESTestCase { + + private NetworkService networkService; + private MockBigArrays bigArrays; + private ThreadPool threadPool; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + threadPool = new TestThreadPool("test"); + } + + @After + public void shutdown() throws Exception { + terminate(threadPool); + } + + public void testBadParameterEncoding() throws Exception { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + fail(); + } + + @Override + public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause) { + try { + final Exception e = cause instanceof Exception ? (Exception) cause : new ElasticsearchException(cause); + channel.sendResponse(new BytesRestResponse(channel, RestStatus.BAD_REQUEST, e)); + } catch (final IOException e) { + throw new UncheckedIOException(e); + } + } + }; + + try (HttpServerTransport httpServerTransport = + new Netty4HttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + httpServerTransport.start(); + final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses()); + + try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) { + final Collection responses = + nettyHttpClient.get(transportAddress.address(), "/_cluster/settings?pretty=%"); + assertThat(responses, hasSize(1)); + assertThat(responses.iterator().next().status().code(), equalTo(400)); + final Collection responseBodies = Netty4HttpClient.returnHttpResponseBodies(responses); + assertThat(responseBodies, hasSize(1)); + assertThat(responseBodies.iterator().next(), containsString("\"type\":\"bad_parameter_exception\"")); + assertThat( + responseBodies.iterator().next(), + containsString( + "\"reason\":\"java.lang.IllegalArgumentException: unterminated escape sequence at end of string: %\"")); + } + } + } + +} diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index e9de4ef50a5a4..918e98fd2e7c0 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -330,7 +330,8 @@ private FullHttpResponse executeRequest(final Settings settings, final String or } httpRequest.headers().add(HttpHeaderNames.HOST, host); final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); - final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + final Netty4HttpRequest request = + new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); Netty4HttpChannel channel = new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index ae2449d2820d1..028770ed22469 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.rest; +import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Setting; @@ -74,4 +75,29 @@ public void testBadRequest() throws IOException { assertThat(e, hasToString(containsString("too_long_frame_exception"))); assertThat(e, hasToString(matches("An HTTP line is larger than \\d+ bytes"))); } + + public void testInvalidParameterValue() throws IOException { + final ResponseException e = expectThrows( + ResponseException.class, + () -> client().performRequest("GET", "/_cluster/settings", Collections.singletonMap("pretty", "neither-true-nor-false"))); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("illegal_argument_exception")); + assertThat(map.get("reason"), equalTo("Failed to parse value [neither-true-nor-false] as only [true] or [false] are allowed.")); + } + + public void testInvalidHeaderValue() throws IOException { + final BasicHeader header = new BasicHeader("Content-Type", "\t"); + final ResponseException e = + expectThrows(ResponseException.class, () -> client().performRequest("GET", "/_cluster/settings", header)); + final Response response = e.getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(400)); + final ObjectPath objectPath = ObjectPath.createFromResponse(response); + final Map map = objectPath.evaluate("error"); + assertThat(map.get("type"), equalTo("content_type_header_exception")); + assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java index 6c84c1bb963fe..d376b65ef2d88 100644 --- a/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java +++ b/server/src/main/java/org/elasticsearch/rest/AbstractRestChannel.java @@ -48,6 +48,13 @@ public abstract class AbstractRestChannel implements RestChannel { private BytesStreamOutput bytesOut; + /** + * Construct a channel for handling the request. + * + * @param request the request + * @param detailedErrorsEnabled if detailed errors should be reported to the channel + * @throws IllegalArgumentException if parsing the pretty or human parameters fails + */ protected AbstractRestChannel(RestRequest request, boolean detailedErrorsEnabled) { this.request = request; this.detailedErrorsEnabled = detailedErrorsEnabled; diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index e5b3cfa67e5a9..bd46a20f31231 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -64,49 +64,69 @@ public abstract class RestRequest implements ToXContent.Params { private final SetOnce xContentType = new SetOnce<>(); /** - * Creates a new RestRequest - * @param xContentRegistry the xContentRegistry to use when parsing XContent - * @param uri the URI of the request that potentially contains request parameters - * @param headers a map of the headers. This map should implement a Case-Insensitive hashing for keys as HTTP header names are case - * insensitive + * Creates a new REST request. + * + * @param xContentRegistry the content registry + * @param uri the raw URI that will be parsed into the path and the parameters + * @param headers a map of the header; this map should implement a case-insensitive lookup + * @throws BadParameterException if the parameters can not be decoded + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest(NamedXContentRegistry xContentRegistry, String uri, Map> headers) { - this.xContentRegistry = xContentRegistry; + public RestRequest(final NamedXContentRegistry xContentRegistry, final String uri, final Map> headers) { + this(xContentRegistry, params(uri), path(uri), headers); + } + + private static Map params(final String uri) { final Map params = new HashMap<>(); - int pathEndPos = uri.indexOf('?'); - if (pathEndPos < 0) { - this.rawPath = uri; - } else { - this.rawPath = uri.substring(0, pathEndPos); - RestUtils.decodeQueryString(uri, pathEndPos + 1, params); + int index = uri.indexOf('?'); + if (index >= 0) { + try { + RestUtils.decodeQueryString(uri, index + 1, params); + } catch (final IllegalArgumentException e) { + throw new BadParameterException(e); + } } - this.params = params; - this.headers = Collections.unmodifiableMap(headers); - final List contentType = getAllHeaderValues("Content-Type"); - final XContentType xContentType = parseContentType(contentType); - if (xContentType != null) { - this.xContentType.set(xContentType); + return params; + } + + private static String path(final String uri) { + final int index = uri.indexOf('?'); + if (index >= 0) { + return uri.substring(0, index); + } else { + return uri; } } /** - * Creates a new RestRequest - * @param xContentRegistry the xContentRegistry to use when parsing XContent - * @param params the parameters of the request - * @param path the path of the request. This should not contain request parameters - * @param headers a map of the headers. This map should implement a Case-Insensitive hashing for keys as HTTP header names are case - * insensitive + * Creates a new REST request. In contrast to + * {@link RestRequest#RestRequest(NamedXContentRegistry, Map, String, Map)}, the path is not decoded so this constructor will not throw + * a {@link BadParameterException}. + * + * @param xContentRegistry the content registry + * @param params the request parameters + * @param path the raw path (which is not parsed) + * @param headers a map of the header; this map should implement a case-insensitive lookup + * @throws ContentTypeHeaderException if the Content-Type header can not be parsed */ - public RestRequest(NamedXContentRegistry xContentRegistry, Map params, String path, Map> headers) { + public RestRequest( + final NamedXContentRegistry xContentRegistry, + final Map params, + final String path, + final Map> headers) { + final XContentType xContentType; + try { + xContentType = parseContentType(headers.get("Content-Type")); + } catch (final IllegalArgumentException e) { + throw new ContentTypeHeaderException(e); + } + if (xContentType != null) { + this.xContentType.set(xContentType); + } this.xContentRegistry = xContentRegistry; this.params = params; this.rawPath = path; this.headers = Collections.unmodifiableMap(headers); - final List contentType = getAllHeaderValues("Content-Type"); - final XContentType xContentType = parseContentType(contentType); - if (xContentType != null) { - this.xContentType.set(xContentType); - } } public enum Method { @@ -423,7 +443,7 @@ public final Tuple contentOrSourceParam() { * Parses the given content type string for the media type. This method currently ignores parameters. */ // TODO stop ignoring parameters such as charset... - private static XContentType parseContentType(List header) { + public static XContentType parseContentType(List header) { if (header == null || header.isEmpty()) { return null; } else if (header.size() > 1) { @@ -444,4 +464,20 @@ private static XContentType parseContentType(List header) { throw new IllegalArgumentException("empty Content-Type header"); } + public static class ContentTypeHeaderException extends RuntimeException { + + ContentTypeHeaderException(final IllegalArgumentException cause) { + super(cause); + } + + } + + public static class BadParameterException extends RuntimeException { + + BadParameterException(final IllegalArgumentException cause) { + super(cause); + } + + } + } diff --git a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 96106125f19ef..a0e6f7020302d 100644 --- a/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -165,27 +165,28 @@ public void testConvert() throws IOException { public void testResponseWhenPathContainsEncodingError() throws IOException { final String path = "%a"; - final RestRequest request = new RestRequest(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, Collections.emptyMap()) { - @Override - public Method method() { - return null; - } - - @Override - public String uri() { - return null; - } - - @Override - public boolean hasContent() { - return false; - } - - @Override - public BytesReference content() { - return null; - } - }; + final RestRequest request = + new RestRequest(NamedXContentRegistry.EMPTY, Collections.emptyMap(), path, Collections.emptyMap()) { + @Override + public Method method() { + return null; + } + + @Override + public String uri() { + return null; + } + + @Override + public boolean hasContent() { + return false; + } + + @Override + public BytesReference content() { + return null; + } + }; final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> RestUtils.decodeComponent(request.rawPath())); final RestChannel channel = new DetailedExceptionRestChannel(request); // if we try to decode the path, this will throw an IllegalArgumentException again diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index cb2d51f6a675e..f36638a43909f 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -367,9 +367,10 @@ public boolean supportsContentStream() { public void testDispatchWithContentStream() { final String mimeType = randomFrom("application/json", "application/smile"); String content = randomAlphaOfLengthBetween(1, BREAKER_LIMIT.bytesAsInt()); + final List contentTypeHeader = Collections.singletonList(mimeType); FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) - .withContent(new BytesArray(content), null).withPath("/foo") - .withHeaders(Collections.singletonMap("Content-Type", Collections.singletonList(mimeType))).build(); + .withContent(new BytesArray(content), RestRequest.parseContentType(contentTypeHeader)).withPath("/foo") + .withHeaders(Collections.singletonMap("Content-Type", contentTypeHeader)).build(); AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.OK); restController.registerHandler(RestRequest.Method.GET, "/foo", new RestHandler() { @Override diff --git a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java index d1c7d03e1b174..1b4bbff7322de 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestRequestTests.java @@ -38,6 +38,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; public class RestRequestTests extends ESTestCase { public void testContentParser() throws IOException { @@ -130,9 +132,15 @@ public void testPlainTextSupport() { public void testMalformedContentTypeHeader() { final String type = randomFrom("text", "text/:ain; charset=utf-8", "text/plain\";charset=utf-8", ":", "/", "t:/plain"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new ContentRestRequest("", Collections.emptyMap(), - Collections.singletonMap("Content-Type", Collections.singletonList(type)))); - assertEquals("invalid Content-Type header [" + type + "]", e.getMessage()); + final RestRequest.ContentTypeHeaderException e = expectThrows( + RestRequest.ContentTypeHeaderException.class, + () -> { + final Map> headers = Collections.singletonMap("Content-Type", Collections.singletonList(type)); + new ContentRestRequest("", Collections.emptyMap(), headers); + }); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(e.getMessage(), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header [" + type + "]")); } public void testNoContentTypeHeader() { @@ -142,9 +150,12 @@ public void testNoContentTypeHeader() { public void testMultipleContentTypeHeaders() { List headers = new ArrayList<>(randomUnique(() -> randomAlphaOfLengthBetween(1, 16), randomIntBetween(2, 10))); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new ContentRestRequest("", Collections.emptyMap(), - Collections.singletonMap("Content-Type", headers))); - assertEquals("only one Content-Type header should be provided", e.getMessage()); + final RestRequest.ContentTypeHeaderException e = expectThrows( + RestRequest.ContentTypeHeaderException.class, + () -> new ContentRestRequest("", Collections.emptyMap(), Collections.singletonMap("Content-Type", headers))); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf((IllegalArgumentException.class))); + assertThat(e.getMessage(), equalTo("java.lang.IllegalArgumentException: only one Content-Type header should be provided")); } public void testRequiredContent() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java index 83caf0293e0ab..d0403736400cd 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java @@ -40,8 +40,8 @@ public FakeRestRequest() { this(NamedXContentRegistry.EMPTY, new HashMap<>(), new HashMap<>(), null, Method.GET, "/", null); } - private FakeRestRequest(NamedXContentRegistry xContentRegistry, Map> headers, Map params, - BytesReference content, Method method, String path, SocketAddress remoteAddress) { + private FakeRestRequest(NamedXContentRegistry xContentRegistry, Map> headers, + Map params, BytesReference content, Method method, String path, SocketAddress remoteAddress) { super(xContentRegistry, params, path, headers); this.content = content; this.method = method; From a3e57735228d7b620dc74dcc476d20d713f781be Mon Sep 17 00:00:00 2001 From: Bolarinwa Saheed Olayemi Date: Thu, 29 Mar 2018 00:18:42 +0200 Subject: [PATCH 11/68] Docs: Link to Ansible playbook for Elasticsearch (#29238) Links to the official Ansible playbook for Elasticsearch. --- docs/plugins/integrations.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 162988fe3fc15..90f2c685fdaeb 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -82,6 +82,9 @@ releases 2.0 and later do not support rivers. [float] ==== Supported by Elasticsearch: +* https://github.com/elastic/ansible-elasticsearch[Ansible playbook for Elasticsearch]: + An officially supported ansible playbook for Elasticsearch. Tested with the latest version of 5.x and 6.x on Ubuntu 14.04/16.04, Debian 8, Centos 7. + * https://github.com/elastic/puppet-elasticsearch[Puppet]: Elasticsearch puppet module. From 9bc167466f0fd551c58b8e825454e2df2932fee8 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 28 Mar 2018 22:04:17 -0400 Subject: [PATCH 12/68] TEST: add log testDoNotRenewSyncedFlushWhenAllSealed This test was failed recently. This commit enables debug log and prints out seals. https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-unix-compatibility/os=oraclelinux/2234/console https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+6.x+intake/1437/console --- .../org/elasticsearch/indices/flush/FlushIT.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 934222f9e726a..a914eb435bb7d 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.Arrays; @@ -244,6 +245,12 @@ private void indexDoc(Engine engine, String id) throws IOException { assertThat(indexResult.getFailure(), nullValue()); } + private String syncedFlushDescription(ShardsSyncedFlushResult result) { + return result.shardResponses().entrySet().stream() + .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") + .collect(Collectors.joining(",")); + } + public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -269,6 +276,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); } final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( @@ -284,6 +292,7 @@ public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } + @TestLogging("_root:DEBUG") public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -300,9 +309,11 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("First seal: {}", syncedFlushDescription(firstSeal)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); // Do not renew synced-flush final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); // Shards were updated, renew synced flush. @@ -311,6 +322,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); // Manually remove or change sync-id, renew synced flush. @@ -326,6 +338,7 @@ public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { assertThat(shard.commitStats().syncId(), nullValue()); } final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), shardId); + logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); } From 21c985122b3db9120847cfd419d37383f82950a8 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 28 Mar 2018 20:21:30 -0700 Subject: [PATCH 13/68] Build: Fix repos.mavenLocal casing (#29289) The sysprop repos.mavenLocal may be used to add the local .m2 maven repository for testing snapshots of locally build dependencies. Unfortunately this has to be checked in two different places (they cannot be shared, due to buildSrc being built essentially as a separate project), and the casing of the string sysprop lookups did not align. This commit fixes BuildPlugin's checking of repos.mavenLocal to use the correct casing (camelCase, to match the gradle dsl element). --- .../main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index d03591722a2fd..fcd6d6925598f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -311,8 +311,8 @@ class BuildPlugin implements Plugin { /** Adds repositories used by ES dependencies */ static void configureRepositories(Project project) { RepositoryHandler repos = project.repositories - if (System.getProperty("repos.mavenlocal") != null) { - // with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is + if (System.getProperty("repos.mavenLocal") != null) { + // with -Drepos.mavenLocal=true we can force checking the local .m2 repo which is // useful for development ie. bwc tests where we install stuff in the local repository // such that we don't have to pass hardcoded files to gradle repos.mavenLocal() From 6578d8a2a83cf8f57bd858f28eb1ab76922291bd Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Thu, 29 Mar 2018 05:29:02 +0200 Subject: [PATCH 14/68] Update to forbiddenapis 2.5 (#29285) --- buildSrc/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6bc461e1b598c..5256968b6ca3e 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -94,7 +94,7 @@ dependencies { compile 'com.netflix.nebula:gradle-info-plugin:3.0.3' compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r' compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... - compile 'de.thetaphi:forbiddenapis:2.4.1' + compile 'de.thetaphi:forbiddenapis:2.5' compile 'org.apache.rat:apache-rat:0.11' compile "org.elasticsearch:jna:4.5.1" } From b6568d0cfd1d1d9d8c58fadab8efb863672e9cd9 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Thu, 29 Mar 2018 09:16:53 +0200 Subject: [PATCH 15/68] Do not load global state when deleting a snapshot (#29278) When deleting a snapshot, it is not necessary to load and to parse the global metadata of the snapshot to delete. Now indices are stored in the snapshot metadata file, we have all the information to resolve the shards files to delete. This commit removes the readSnapshotMetaData() method that was used to load both global and index metadata files. Test coverage should be enough as SharedClusterSnapshotRestoreIT already contains several deletion tests. Related to #28934 --- .../blobstore/BlobStoreRepository.java | 91 ++++++------------- ...etadataLoadingDuringSnapshotRestoreIT.java | 6 ++ .../SharedClusterSnapshotRestoreIT.java | 64 ++++++++++++- 3 files changed, 97 insertions(+), 64 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index e4101bb9289b1..0f8e29d7f3835 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -341,27 +341,17 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { if (isReadOnly()) { throw new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository"); } + final RepositoryData repositoryData = getRepositoryData(); - List indices = Collections.emptyList(); SnapshotInfo snapshot = null; try { snapshot = getSnapshotInfo(snapshotId); - indices = snapshot.indices(); } catch (SnapshotMissingException ex) { throw ex; } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) { logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); } - MetaData metaData = null; - try { - if (snapshot != null) { - metaData = readSnapshotMetaData(snapshotId, snapshot.version(), repositoryData.resolveIndices(indices), true); - } else { - metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true); - } - } catch (IOException | SnapshotException ex) { - logger.warn(() -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex); - } + try { // Delete snapshot from the index file, since it is the maintainer of truth of active snapshots final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); @@ -373,24 +363,29 @@ public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) { deleteGlobalMetaDataBlobIgnoringErrors(snapshot, snapshotId.getUUID()); // Now delete all indices - for (String index : indices) { - final IndexId indexId = repositoryData.resolveIndexId(index); - BlobPath indexPath = basePath().add("indices").add(indexId.getId()); - BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); - try { - indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); - } catch (IOException ex) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex); - } - if (metaData != null) { - IndexMetaData indexMetaData = metaData.index(index); + if (snapshot != null) { + final List indices = snapshot.indices(); + for (String index : indices) { + final IndexId indexId = repositoryData.resolveIndexId(index); + + IndexMetaData indexMetaData = null; + try { + indexMetaData = getSnapshotIndexMetaData(snapshotId, indexId); + } catch (ElasticsearchParseException | IOException ex) { + logger.warn(() -> + new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index), ex); + } + + deleteIndexMetaDataBlobIgnoringErrors(snapshot, indexId); + if (indexMetaData != null) { for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) { try { delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId)); } catch (SnapshotException ex) { final int finalShardId = shardId; - logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex); + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", + snapshotId, index, finalShardId), ex); } } } @@ -448,6 +443,16 @@ private void deleteGlobalMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotI } } + private void deleteIndexMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final IndexId indexId) { + final SnapshotId snapshotId = snapshotInfo.snapshotId(); + BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(basePath().add("indices").add(indexId.getId())); + try { + indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); + } catch (IOException ex) { + logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, indexId.getName()), ex); + } + } + /** * {@inheritDoc} */ @@ -508,44 +513,6 @@ public IndexMetaData getSnapshotIndexMetaData(final SnapshotId snapshotId, final return indexMetaDataFormat.read(blobStore().blobContainer(indexPath), snapshotId.getUUID()); } - /** - * Returns the global metadata associated with the snapshot. - *

- * The returned meta data contains global metadata as well as metadata - * for all indices listed in the indices parameter. - */ - private MetaData readSnapshotMetaData(final SnapshotId snapshotId, - final Version snapshotVersion, - final List indices, - final boolean ignoreErrors) throws IOException { - if (snapshotVersion == null) { - // When we delete corrupted snapshots we might not know which version we are dealing with - // We can try detecting the version based on the metadata file format - assert ignoreErrors; - if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID()) == false) { - throw new SnapshotMissingException(metadata.name(), snapshotId); - } - } - - final MetaData.Builder metaData = MetaData.builder(getSnapshotGlobalMetaData(snapshotId)); - if (indices != null) { - for (IndexId index : indices) { - try { - metaData.put(getSnapshotIndexMetaData(snapshotId, index), false); - } catch (ElasticsearchParseException | IOException ex) { - if (ignoreErrors == false) { - throw new SnapshotException(metadata.name(), snapshotId, - "[" + index.getName() + "] failed to read metadata for index", ex); - } else { - logger.warn(() -> - new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex); - } - } - } - } - return metaData.build(); - } - /** * Configures RateLimiter based on repository and global settings * diff --git a/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index bbc2a54b41baf..13b74df4e3d2b 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -139,6 +139,12 @@ public void testWhenMetadataAreLoaded() throws Exception { assertGlobalMetadataLoads("snap", 1); assertIndexMetadataLoads("snap", "docs", 4); assertIndexMetadataLoads("snap", "others", 3); + + // Deleting a snapshot does not load the global metadata state but loads each index metadata + assertAcked(client().admin().cluster().prepareDeleteSnapshot("repository", "snap").get()); + assertGlobalMetadataLoads("snap", 1); + assertIndexMetadataLoads("snap", "docs", 5); + assertIndexMetadataLoads("snap", "others", 4); } private void assertGlobalMetadataLoads(final String snapshot, final int times) { diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index d2656619bd58d..dbaf26c965749 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -1291,7 +1291,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - logger.info("--> delete index metadata and shard metadata"); + logger.info("--> delete global state metadata"); Path metadata = repo.resolve("meta-" + createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID() + ".dat"); Files.delete(metadata); @@ -1341,6 +1341,67 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); } + /** Tests that a snapshot with a corrupted global state file can still be deleted */ + public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { + final Path repo = randomRepoPath(); + + assertAcked(client().admin().cluster().preparePutRepository("test-repo") + .setType("fs") + .setSettings(Settings.builder() + .put("location", repo) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); + + createIndex("test-idx-1", "test-idx-2"); + indexRandom(true, + client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"), + client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar")); + flushAndRefresh("test-idx-1", "test-idx-2"); + + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + final Path globalStatePath = repo.resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); + if (randomBoolean()) { + // Delete the global state metadata file + IOUtils.deleteFilesIgnoringExceptions(globalStatePath); + } else { + // Truncate the global state metadata file + try (SeekableByteChannel outChan = Files.newByteChannel(globalStatePath, StandardOpenOption.WRITE)) { + outChan.truncate(randomInt(10)); + } + } + + List snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").get().getSnapshots(); + assertThat(snapshotInfos.size(), equalTo(1)); + assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap")); + + SnapshotsStatusResponse snapshotStatusResponse = + client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get(); + assertThat(snapshotStatusResponse.getSnapshots(), hasSize(1)); + assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); + + assertAcked(client().admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get()); + assertThrows(client().admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap"), + SnapshotMissingException.class); + assertThrows(client().admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"), + SnapshotMissingException.class); + + createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap") + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + } + public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { Path repo = randomRepoPath(); logger.info("--> creating repository at {}", repo.toAbsolutePath()); @@ -2623,7 +2684,6 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { assertThat(snapshotInfo.successfulShards(), greaterThan(0)); assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); - // Truncate the global state metadata file final Path globalStatePath = repo.resolve("meta-" + snapshotInfo.snapshotId().getUUID() + ".dat"); try(SeekableByteChannel outChan = Files.newByteChannel(globalStatePath, StandardOpenOption.WRITE)) { outChan.truncate(randomInt(10)); From 04d0edc8ee88247bb4b148c4e7d6469df85fd4b4 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 29 Mar 2018 09:23:43 -0400 Subject: [PATCH 16/68] Fix incorrect geohash for lat 90, lon 180 (#29256) Due to special treatment for the 0xFFFFFF... value in GeoHashUtils' encodeLatLon method, the hashcode for lat 90, lon 180 is incorrectly encoded as `"000000000000"` instead of "zzzzzzzzzzzz". This commit removes the special treatment and fixes the issue. Closes #22163 --- .../org/elasticsearch/common/geo/GeoHashUtils.java | 6 +----- .../org/elasticsearch/common/geo/GeoHashTests.java | 14 +++++++++++++- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index cb31940a49c0d..acfb8970e684c 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -57,11 +57,7 @@ private GeoHashUtils() { * 31 bit encoding utils * *************************/ public static long encodeLatLon(final double lat, final double lon) { - long result = MortonEncoder.encode(lat, lon); - if (result == 0xFFFFFFFFFFFFFFFFL) { - return result & 0xC000000000000000L; - } - return result >>> 2; + return MortonEncoder.encode(lat, lon) >>> 2; } /** diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java index d2ae8401c5510..e4856fd01136b 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoHashTests.java @@ -25,7 +25,7 @@ * Tests for {@link org.elasticsearch.common.geo.GeoHashUtils} */ public class GeoHashTests extends ESTestCase { - public void testGeohashAsLongRoutines() { + public void testGeohashAsLongRoutines() { final GeoPoint expected = new GeoPoint(); final GeoPoint actual = new GeoPoint(); //Ensure that for all points at all supported levels of precision @@ -70,4 +70,16 @@ public void testBboxFromHash() { assertEquals(expectedLatDiff, bbox.maxLat - bbox.minLat, 0.00001); assertEquals(hash, GeoHashUtils.stringEncode(bbox.minLon, bbox.minLat, level)); } + + public void testGeohashExtremes() { + assertEquals("000000000000", GeoHashUtils.stringEncode(-180, -90)); + assertEquals("800000000000", GeoHashUtils.stringEncode(-180, 0)); + assertEquals("bpbpbpbpbpbp", GeoHashUtils.stringEncode(-180, 90)); + assertEquals("h00000000000", GeoHashUtils.stringEncode(0, -90)); + assertEquals("s00000000000", GeoHashUtils.stringEncode(0, 0)); + assertEquals("upbpbpbpbpbp", GeoHashUtils.stringEncode(0, 90)); + assertEquals("pbpbpbpbpbpb", GeoHashUtils.stringEncode(180, -90)); + assertEquals("xbpbpbpbpbpb", GeoHashUtils.stringEncode(180, 0)); + assertEquals("zzzzzzzzzzzz", GeoHashUtils.stringEncode(180, 90)); + } } From eb8b31746a0807604997105b701cbb9d552611dd Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 29 Mar 2018 19:35:57 +0200 Subject: [PATCH 17/68] Move trimming unsafe commits from engine ctor to store (#29260) As follow up to #28245 , this PR removes the logic for selecting the right start commit from the Engine constructor in favor of explicitly trimming them in the Store, before the engine is opened. This makes the constructor in engine follow standard Lucene semantics and use the last commit. Relates #28245 Relates #29156 --- docs/reference/indices/flush.asciidoc | 4 +- .../index/engine/CombinedDeletionPolicy.java | 47 ++-------- .../index/engine/InternalEngine.java | 53 +++-------- .../index/seqno/SequenceNumbers.java | 8 ++ .../elasticsearch/index/shard/IndexShard.java | 3 + .../org/elasticsearch/index/store/Store.java | 91 ++++++++++++++++++- .../index/translog/Translog.java | 23 ++++- .../engine/CombinedDeletionPolicyTests.java | 38 +------- .../index/engine/InternalEngineTests.java | 90 ++++++++++++------ 9 files changed, 207 insertions(+), 150 deletions(-) diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index 87b1e90a4d517..db1f7c2fe00a9 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -93,12 +93,12 @@ which returns something similar to: { "commit" : { "id" : "3M3zkw2GHMo2Y4h4/KFKCg==", - "generation" : 4, + "generation" : 3, "user_data" : { "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", "local_checkpoint" : "-1", - "translog_generation" : "3", + "translog_generation" : "2", "max_seq_no" : "-1", "sync_id" : "AVvFY-071siAOuFGEO9P", <1> "max_unsafe_auto_id_timestamp" : "-1" diff --git a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java index 6f06c310e4cd5..d0575c8a8c977 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CombinedDeletionPolicy.java @@ -47,60 +47,27 @@ public final class CombinedDeletionPolicy extends IndexDeletionPolicy { private final Logger logger; private final TranslogDeletionPolicy translogDeletionPolicy; private final LongSupplier globalCheckpointSupplier; - private final IndexCommit startingCommit; private final ObjectIntHashMap snapshottedCommits; // Number of snapshots held against each commit point. private volatile IndexCommit safeCommit; // the most recent safe commit point - its max_seqno at most the persisted global checkpoint. private volatile IndexCommit lastCommit; // the most recent commit point - CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, - LongSupplier globalCheckpointSupplier, IndexCommit startingCommit) { + CombinedDeletionPolicy(Logger logger, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) { this.logger = logger; this.translogDeletionPolicy = translogDeletionPolicy; this.globalCheckpointSupplier = globalCheckpointSupplier; - this.startingCommit = startingCommit; this.snapshottedCommits = new ObjectIntHashMap<>(); } @Override public synchronized void onInit(List commits) throws IOException { assert commits.isEmpty() == false : "index is opened, but we have no commits"; - assert startingCommit != null && commits.contains(startingCommit) : "Starting commit not in the existing commit list; " - + "startingCommit [" + startingCommit + "], commit list [" + commits + "]"; - keepOnlyStartingCommitOnInit(commits); - updateTranslogDeletionPolicy(); - } - - /** - * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe - * at the recovering time but they can suddenly become safe in the future. - * The following issues can happen if unsafe commits are kept oninit. - *

- * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) - * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) - * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use - * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the - * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. - *

- * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit - * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). - * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new - * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery - * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 - * while the local checkpoint of c2 is 2. - *

- * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced - * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, - * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. - */ - private void keepOnlyStartingCommitOnInit(List commits) throws IOException { - for (IndexCommit commit : commits) { - if (startingCommit.equals(commit) == false) { - this.deleteCommit(commit); - } + onCommit(commits); + if (safeCommit != commits.get(commits.size() - 1)) { + throw new IllegalStateException("Engine is opened, but the last commit isn't safe. Global checkpoint [" + + globalCheckpointSupplier.getAsLong() + "], seqNo is last commit [" + + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(lastCommit.getUserData().entrySet()) + "], " + + "seqNos in safe commit [" + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(safeCommit.getUserData().entrySet()) + "]"); } - assert startingCommit.isDeleted() == false : "Starting commit must not be deleted"; - lastCommit = startingCommit; - safeCommit = startingCommit; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 864385667f5fe..24d1fc16b702d 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -41,10 +41,8 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.InfoStream; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; @@ -59,6 +57,7 @@ import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -70,7 +69,6 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ElasticsearchMergePolicy; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogCorruptedException; @@ -78,8 +76,6 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; @@ -183,12 +179,10 @@ public InternalEngine(EngineConfig engineConfig) { translog = openTranslog(engineConfig, translogDeletionPolicy, engineConfig.getGlobalCheckpointSupplier()); assert translog.getGeneration() != null; this.translog = translog; - final IndexCommit startingCommit = getStartingCommitPoint(); - assert startingCommit != null : "Starting commit should be non-null"; - this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier, startingCommit); - this.combinedDeletionPolicy = new CombinedDeletionPolicy(logger, translogDeletionPolicy, - translog::getLastSyncedGlobalCheckpoint, startingCommit); - writer = createWriter(startingCommit); + this.localCheckpointTracker = createLocalCheckpointTracker(localCheckpointTrackerSupplier); + this.combinedDeletionPolicy = + new CombinedDeletionPolicy(logger, translogDeletionPolicy, translog::getLastSyncedGlobalCheckpoint); + writer = createWriter(); bootstrapAppendOnlyInfoFromWriter(writer); historyUUID = loadOrGenerateHistoryUUID(writer); Objects.requireNonNull(historyUUID, "history uuid should not be null"); @@ -232,10 +226,11 @@ public InternalEngine(EngineConfig engineConfig) { } private LocalCheckpointTracker createLocalCheckpointTracker( - BiFunction localCheckpointTrackerSupplier, IndexCommit startingCommit) throws IOException { + BiFunction localCheckpointTrackerSupplier) throws IOException { final long maxSeqNo; final long localCheckpoint; - final SequenceNumbers.CommitInfo seqNoStats = Store.loadSeqNoInfo(startingCommit); + final SequenceNumbers.CommitInfo seqNoStats = + SequenceNumbers.loadSeqNoInfoFromLuceneCommit(store.readLastCommittedSegmentsInfo().userData.entrySet()); maxSeqNo = seqNoStats.maxSeqNo; localCheckpoint = seqNoStats.localCheckpoint; logger.trace("recovered maximum sequence number [{}] and local checkpoint [{}]", maxSeqNo, localCheckpoint); @@ -395,31 +390,6 @@ public void skipTranslogRecovery() { pendingTranslogRecovery.set(false); // we are good - now we can commit } - private IndexCommit getStartingCommitPoint() throws IOException { - final IndexCommit startingIndexCommit; - final long lastSyncedGlobalCheckpoint = translog.getLastSyncedGlobalCheckpoint(); - final long minRetainedTranslogGen = translog.getMinFileGeneration(); - final List existingCommits = DirectoryReader.listCommits(store.directory()); - // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog - // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. - // To avoid this issue, we only select index commits whose translog are fully retained. - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_2_0)) { - final List recoverableCommits = new ArrayList<>(); - for (IndexCommit commit : existingCommits) { - if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { - recoverableCommits.add(commit); - } - } - assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + - "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); - } else { - // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. - startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); - } - return startingIndexCommit; - } - private void recoverFromTranslogInternal() throws IOException { Translog.TranslogGeneration translogGeneration = translog.getGeneration(); final int opsRecovered; @@ -1907,9 +1877,9 @@ private long loadCurrentVersionFromIndex(Term uid) throws IOException { } } - private IndexWriter createWriter(IndexCommit startingCommit) throws IOException { + private IndexWriter createWriter() throws IOException { try { - final IndexWriterConfig iwc = getIndexWriterConfig(startingCommit); + final IndexWriterConfig iwc = getIndexWriterConfig(); return createWriter(store.directory(), iwc); } catch (LockObtainFailedException ex) { logger.warn("could not lock IndexWriter", ex); @@ -1922,11 +1892,10 @@ IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOEx return new IndexWriter(directory, iwc); } - private IndexWriterConfig getIndexWriterConfig(IndexCommit startingCommit) { + private IndexWriterConfig getIndexWriterConfig() { final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); iwc.setCommitOnClose(false); // we by default don't commit on close iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); - iwc.setIndexCommit(startingCommit); iwc.setIndexDeletionPolicy(combinedDeletionPolicy); // with tests.verbose, lucene sets this up: plumb to align with filesystem stream boolean verbose = false; diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index b30743c2cff93..0c071f4b2d422 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -122,5 +122,13 @@ public CommitInfo(long maxSeqNo, long localCheckpoint) { this.maxSeqNo = maxSeqNo; this.localCheckpoint = localCheckpoint; } + + @Override + public String toString() { + return "CommitInfo{" + + "maxSeqNo=" + maxSeqNo + + ", localCheckpoint=" + localCheckpoint + + '}'; + } } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 0ab2cc699d355..e2e8459943c26 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1317,6 +1317,9 @@ private void innerOpenEngineAndTranslog() throws IOException { assertMaxUnsafeAutoIdInCommit(); + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + createNewEngine(config); verifyNotClosed(); // We set active because we are now writing operations to the engine; this way, if we go idle after some time and become inactive, diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 297790890c1b0..83fded4a1f18b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -24,6 +24,7 @@ import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.index.IndexFormatTooNewException; @@ -75,6 +76,7 @@ import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.CombinedDeletionPolicy; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -1463,7 +1465,7 @@ private static long estimateSize(Directory directory) throws IOException { */ public void createEmpty() throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory)) { + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory, null)) { final Map map = new HashMap<>(); map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()); map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED)); @@ -1482,7 +1484,7 @@ public void createEmpty() throws IOException { */ public void bootstrapNewHistory() throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { final Map userData = getUserData(writer); final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO)); final Map map = new HashMap<>(); @@ -1501,7 +1503,7 @@ public void bootstrapNewHistory() throws IOException { */ public void associateIndexWithNewTranslog(final String translogUUID) throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) { throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]"); } @@ -1520,7 +1522,7 @@ public void associateIndexWithNewTranslog(final String translogUUID) throws IOEx */ public void ensureIndexHasHistoryUUID() throws IOException { metadataLock.writeLock().lock(); - try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) { + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, null)) { final Map userData = getUserData(writer); if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) { updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID())); @@ -1530,6 +1532,82 @@ public void ensureIndexHasHistoryUUID() throws IOException { } } + /** + * Keeping existing unsafe commits when opening an engine can be problematic because these commits are not safe + * at the recovering time but they can suddenly become safe in the future. + * The following issues can happen if unsafe commits are kept oninit. + *

+ * 1. Replica can use unsafe commit in peer-recovery. This happens when a replica with a safe commit c1(max_seqno=1) + * and an unsafe commit c2(max_seqno=2) recovers from a primary with c1(max_seqno=1). If a new document(seqno=2) + * is added without flushing, the global checkpoint is advanced to 2; and the replica recovers again, it will use + * the unsafe commit c2(max_seqno=2 at most gcp=2) as the starting commit for sequenced-based recovery even the + * commit c2 contains a stale operation and the document(with seqno=2) will not be replicated to the replica. + *

+ * 2. Min translog gen for recovery can go backwards in peer-recovery. This happens when are replica with a safe commit + * c1(local_checkpoint=1, recovery_translog_gen=1) and an unsafe commit c2(local_checkpoint=2, recovery_translog_gen=2). + * The replica recovers from a primary, and keeps c2 as the last commit, then sets last_translog_gen to 2. Flushing a new + * commit on the replica will cause exception as the new last commit c3 will have recovery_translog_gen=1. The recovery + * translog generation of a commit is calculated based on the current local checkpoint. The local checkpoint of c3 is 1 + * while the local checkpoint of c2 is 2. + *

+ * 3. Commit without translog can be used in recovery. An old index, which was created before multiple-commits is introduced + * (v6.2), may not have a safe commit. If that index has a snapshotted commit without translog and an unsafe commit, + * the policy can consider the snapshotted commit as a safe commit for recovery even the commit does not have translog. + */ + public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long minRetainedTranslogGen, + final org.elasticsearch.Version indexVersionCreated) throws IOException { + metadataLock.writeLock().lock(); + try { + final List existingCommits = DirectoryReader.listCommits(directory); + if (existingCommits.isEmpty()) { + throw new IllegalArgumentException("No index found to trim"); + } + final String translogUUID = existingCommits.get(existingCommits.size() - 1).getUserData().get(Translog.TRANSLOG_UUID_KEY); + final IndexCommit startingIndexCommit; + // We may not have a safe commit if an index was create before v6.2; and if there is a snapshotted commit whose translog + // are not retained but max_seqno is at most the global checkpoint, we may mistakenly select it as a starting commit. + // To avoid this issue, we only select index commits whose translog are fully retained. + if (indexVersionCreated.before(org.elasticsearch.Version.V_6_2_0)) { + final List recoverableCommits = new ArrayList<>(); + for (IndexCommit commit : existingCommits) { + if (minRetainedTranslogGen <= Long.parseLong(commit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))) { + recoverableCommits.add(commit); + } + } + assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " + + "commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]"; + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint); + } else { + // TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint. + startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(existingCommits, lastSyncedGlobalCheckpoint); + } + + if (translogUUID.equals(startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY)) == false) { + throw new IllegalStateException("starting commit translog uuid [" + + startingIndexCommit.getUserData().get(Translog.TRANSLOG_UUID_KEY) + "] is not equal to last commit's translog uuid [" + + translogUUID + "]"); + } + if (startingIndexCommit.equals(existingCommits.get(existingCommits.size() - 1)) == false) { + try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory, startingIndexCommit)) { + // this achieves two things: + // - by committing a new commit based on the starting commit, it make sure the starting commit will be opened + // - deletes any other commit (by lucene standard deletion policy) + // + // note that we can't just use IndexCommit.delete() as we really want to make sure that those files won't be used + // even if a virus scanner causes the files not to be used. + + // The new commit will use segment files from the starting commit but userData from the last commit by default. + // Thus, we need to manually set the userData from the starting commit to the new commit. + writer.setLiveCommitData(startingIndexCommit.getUserData().entrySet()); + writer.commit(); + } + } + } finally { + metadataLock.writeLock().unlock(); + } + } + + private void updateCommitData(IndexWriter writer, Map keysToUpdate) throws IOException { final Map userData = getUserData(writer); userData.putAll(keysToUpdate); @@ -1543,9 +1621,12 @@ private Map getUserData(IndexWriter writer) { return userData; } - private IndexWriter newIndexWriter(IndexWriterConfig.OpenMode openMode, final Directory dir) throws IOException { + private static IndexWriter newIndexWriter(final IndexWriterConfig.OpenMode openMode, final Directory dir, final IndexCommit commit) + throws IOException { + assert openMode == IndexWriterConfig.OpenMode.APPEND || commit == null : "can't specify create flag with a commit"; IndexWriterConfig iwc = new IndexWriterConfig(null) .setCommitOnClose(false) + .setIndexCommit(commit) // we don't want merges to happen here - we call maybe merge on the engine // later once we stared it up otherwise we would need to wait for it here // we also don't specify a codec here and merges should use the engines for this index diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 62e47d08ded54..b6b6f656be44f 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -22,7 +22,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.common.UUIDs; @@ -38,6 +37,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; @@ -1705,6 +1705,11 @@ static Checkpoint readCheckpoint(final Path location) throws IOException { * @throws TranslogCorruptedException if the translog is corrupted or mismatched with the given uuid */ public static long readGlobalCheckpoint(final Path location, final String expectedTranslogUUID) throws IOException { + final Checkpoint checkpoint = readCheckpoint(location, expectedTranslogUUID); + return checkpoint.globalCheckpoint; + } + + private static Checkpoint readCheckpoint(Path location, String expectedTranslogUUID) throws IOException { final Checkpoint checkpoint = readCheckpoint(location); // We need to open at least translog reader to validate the translogUUID. final Path translogFile = location.resolve(getFilename(checkpoint.generation)); @@ -1715,7 +1720,21 @@ public static long readGlobalCheckpoint(final Path location, final String expect } catch (Exception ex) { throw new TranslogCorruptedException("Translog at [" + location + "] is corrupted", ex); } - return checkpoint.globalCheckpoint; + return checkpoint; + } + + /** + * Returns the minimum translog generation retained by the translog at the given location. + * This ensures that the translogUUID from this translog matches with the provided translogUUID. + * + * @param location the location of the translog + * @return the minimum translog generation + * @throws IOException if an I/O exception occurred reading the checkpoint + * @throws TranslogCorruptedException if the translog is corrupted or mismatched with the given uuid + */ + public static long readMinTranslogGeneration(final Path location, final String expectedTranslogUUID) throws IOException { + final Checkpoint checkpoint = readCheckpoint(location, expectedTranslogUUID); + return checkpoint.minTranslogGeneration; } /** diff --git a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index 67fd385955f3e..ea7de50b7b34c 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -52,7 +52,7 @@ public class CombinedDeletionPolicyTests extends ESTestCase { public void testKeepCommitsAfterGlobalCheckpoint() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final LongArrayList maxSeqNoList = new LongArrayList(); final LongArrayList translogGenList = new LongArrayList(); @@ -91,7 +91,7 @@ public void testAcquireIndexCommit() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(); final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); long lastMaxSeqNo = between(1, 1000); long lastTranslogGen = between(1, 20); int safeIndex = 0; @@ -161,7 +161,7 @@ public void testLegacyIndex() throws Exception { final UUID translogUUID = UUID.randomUUID(); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); long legacyTranslogGen = randomNonNegativeLong(); IndexCommit legacyCommit = mockLegacyIndexCommit(translogUUID, legacyTranslogGen); @@ -194,7 +194,7 @@ public void testLegacyIndex() throws Exception { public void testDeleteInvalidCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final int invalidCommits = between(1, 10); final List commitList = new ArrayList<>(); @@ -217,39 +217,11 @@ public void testDeleteInvalidCommits() throws Exception { } } - /** - * Keeping existing unsafe commits can be problematic because these commits are not safe at the recovering time - * but they can suddenly become safe in the future. See {@link CombinedDeletionPolicy#keepOnlyStartingCommitOnInit(List)} - */ - public void testKeepOnlyStartingCommitOnInit() throws Exception { - final AtomicLong globalCheckpoint = new AtomicLong(randomNonNegativeLong()); - TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - final UUID translogUUID = UUID.randomUUID(); - final List commitList = new ArrayList<>(); - int totalCommits = between(2, 20); - for (int i = 0; i < totalCommits; i++) { - commitList.add(mockIndexCommit(randomNonNegativeLong(), translogUUID, randomNonNegativeLong())); - } - final IndexCommit startingCommit = randomFrom(commitList); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, startingCommit); - indexPolicy.onInit(commitList); - for (IndexCommit commit : commitList) { - if (commit.equals(startingCommit) == false) { - verify(commit, times(1)).delete(); - } - } - verify(startingCommit, never()).delete(); - assertThat(translogPolicy.getMinTranslogGenerationForRecovery(), - equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - assertThat(translogPolicy.getTranslogGenerationOfLastCommit(), - equalTo(Long.parseLong(startingCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - } - public void testCheckUnreferencedCommits() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); final UUID translogUUID = UUID.randomUUID(); final TranslogDeletionPolicy translogPolicy = createTranslogDeletionPolicy(); - CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get, null); + CombinedDeletionPolicy indexPolicy = new CombinedDeletionPolicy(logger, translogPolicy, globalCheckpoint::get); final List commitList = new ArrayList<>(); int totalCommits = between(2, 20); long lastMaxSeqNo = between(1, 1000); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index bba05401d4155..3a7fd94f61905 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -763,6 +763,7 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { } } initialEngine.close(); + trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()); recoveringEngine.recoverFromTranslog(); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { @@ -1168,6 +1169,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine.index(indexForDoc(doc)); EngineConfig config = engine.config(); engine.close(); + trimUnsafeCommits(config); engine = new InternalEngine(config); engine.recoverFromTranslog(); assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); @@ -3581,7 +3583,7 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro } finally { IOUtils.close(initialEngine); } - + trimUnsafeCommits(initialEngine.config()); try (Engine recoveringEngine = new InternalEngine(initialEngine.config())) { recoveringEngine.recoverFromTranslog(); recoveringEngine.fillSeqNoGaps(2); @@ -3933,6 +3935,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { // now do it again to make sure we preserve values etc. try { + trimUnsafeCommits(replicaEngine.config()); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); if (flushed) { assertThat(recoveringEngine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); @@ -4256,31 +4259,6 @@ public void testAcquireIndexCommit() throws Exception { } } - public void testOpenIndexAndTranslogKeepOnlySafeCommit() throws Exception { - IOUtils.close(engine); - final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); - final EngineConfig config = copy(engine.config(), globalCheckpoint::get); - final IndexCommit safeCommit; - try (InternalEngine engine = createEngine(config)) { - final int numDocs = between(5, 50); - for (int i = 0; i < numDocs; i++) { - index(engine, i); - if (randomBoolean()) { - engine.flush(); - } - } - // Selects a starting commit and advances and persists the global checkpoint to that commit. - final List commits = DirectoryReader.listCommits(engine.store.directory()); - safeCommit = randomFrom(commits); - globalCheckpoint.set(Long.parseLong(safeCommit.getUserData().get(SequenceNumbers.MAX_SEQ_NO))); - engine.getTranslog().sync(); - } - try (InternalEngine engine = new InternalEngine(config)) { - final List existingCommits = DirectoryReader.listCommits(engine.store.directory()); - assertThat("safe commit should be kept", existingCommits, contains(safeCommit)); - } - } - public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { IOUtils.close(engine, store); store = createStore(); @@ -4615,4 +4593,64 @@ public void testSkipOptimizeForExposedAppendOnlyOperations() throws Exception { false, randomNonNegativeLong(), localCheckpointTracker.generateSeqNo())); assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); } + + public void testTrimUnsafeCommits() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + final int maxSeqNo = 40; + final List seqNos = LongStream.rangeClosed(0, maxSeqNo).boxed().collect(Collectors.toList()); + Collections.shuffle(seqNos, random()); + try (Store store = createStore()) { + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + final List commitMaxSeqNo = new ArrayList<>(); + final long minTranslogGen; + try (InternalEngine engine = createEngine(config)) { + for (int i = 0; i < seqNos.size(); i++) { + ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(), new BytesArray("{}"), null); + Engine.Index index = new Engine.Index(newUid(doc), doc, seqNos.get(i), 0, + 1, VersionType.EXTERNAL, REPLICA, System.nanoTime(), -1, false); + engine.index(index); + if (randomBoolean()) { + engine.flush(); + final Long maxSeqNoInCommit = seqNos.subList(0, i + 1).stream().max(Long::compareTo).orElse(-1L); + commitMaxSeqNo.add(maxSeqNoInCommit); + } + } + globalCheckpoint.set(randomInt(maxSeqNo)); + engine.syncTranslog(); + minTranslogGen = engine.getTranslog().getMinFileGeneration(); + } + + store.trimUnsafeCommits(globalCheckpoint.get(), minTranslogGen,config.getIndexSettings().getIndexVersionCreated()); + long safeMaxSeqNo = + commitMaxSeqNo.stream().filter(s -> s <= globalCheckpoint.get()) + .reduce((s1, s2) -> s2) // get the last one. + .orElse(SequenceNumbers.NO_OPS_PERFORMED); + final List commits = DirectoryReader.listCommits(store.directory()); + assertThat(commits, hasSize(1)); + assertThat(commits.get(0).getUserData().get(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(safeMaxSeqNo))); + try (IndexReader reader = DirectoryReader.open(commits.get(0))) { + for (LeafReaderContext context: reader.leaves()) { + final NumericDocValues values = context.reader().getNumericDocValues(SeqNoFieldMapper.NAME); + if (values != null) { + for (int docID = 0; docID < context.reader().maxDoc(); docID++) { + if (values.advanceExact(docID) == false) { + throw new AssertionError("Document does not have a seq number: " + docID); + } + assertThat(values.longValue(), lessThanOrEqualTo(globalCheckpoint.get())); + } + } + } + } + } + } + + private static void trimUnsafeCommits(EngineConfig config) throws IOException { + final Store store = config.getStore(); + final TranslogConfig translogConfig = config.getTranslogConfig(); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); + final long minRetainedTranslogGen = Translog.readMinTranslogGeneration(translogConfig.getTranslogPath(), translogUUID); + store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); + } + } From 04dd7387825c116c74953a9cfb149e79b1191084 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 29 Mar 2018 14:16:18 -0400 Subject: [PATCH 18/68] TEST: trim unsafe commits before opening engine Since #29260, unsafe commits must be trimmed before opening an engine. This makes the engine constructor follow Lucene standard semantics and use the last commit. However, we haven't fully applied this change in some tests. Relates #29260 --- .../index/engine/InternalEngineTests.java | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 3a7fd94f61905..71abfac3ebb32 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -644,6 +644,7 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { InternalEngine engine = createEngine(store, translog); engine.close(); + trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); assertTrue(engine.isRecovering()); engine.recoverFromTranslog(); @@ -659,6 +660,7 @@ public void testFlushIsDisabledDuringTranslogRecovery() throws IOException { engine.index(indexForDoc(doc)); engine.close(); + trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); expectThrows(IllegalStateException.class, () -> engine.flush(true, true)); assertTrue(engine.isRecovering()); @@ -690,18 +692,14 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { } finally { IOUtils.close(engine); } - - Engine recoveringEngine = null; - try { - recoveringEngine = new InternalEngine(engine.config()); + trimUnsafeCommits(engine.config()); + try (Engine recoveringEngine = new InternalEngine(engine.config())){ recoveringEngine.recoverFromTranslog(); try (Engine.Searcher searcher = recoveringEngine.acquireSearcher("test")) { final TotalHitCountCollector collector = new TotalHitCountCollector(); searcher.searcher().search(new MatchAllDocsQuery(), collector); assertThat(collector.getTotalHits(), equalTo(operations.get(operations.size() - 1) instanceof Engine.Delete ? 0 : 1)); } - } finally { - IOUtils.close(recoveringEngine); } } @@ -722,6 +720,7 @@ public void testTranslogRecoveryDoesNotReplayIntoTranslog() throws IOException { Engine recoveringEngine = null; try { final AtomicBoolean committed = new AtomicBoolean(); + trimUnsafeCommits(initialEngine.config()); recoveringEngine = new InternalEngine(initialEngine.config()) { @Override @@ -1151,6 +1150,7 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); store.associateIndexWithNewTranslog(translogUUID); } + trimUnsafeCommits(config); engine = new InternalEngine(config); engine.recoverFromTranslog(); assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); @@ -2054,9 +2054,8 @@ public void testSeqNoAndCheckpoints() throws IOException { IOUtils.close(initialEngine); } - InternalEngine recoveringEngine = null; - try { - recoveringEngine = new InternalEngine(initialEngine.config()); + trimUnsafeCommits(initialEngine.engineConfig); + try (InternalEngine recoveringEngine = new InternalEngine(initialEngine.config())){ recoveringEngine.recoverFromTranslog(); assertEquals(primarySeqNo, recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo()); @@ -2075,8 +2074,6 @@ public void testSeqNoAndCheckpoints() throws IOException { assertThat(recoveringEngine.getLocalCheckpointTracker().getCheckpoint(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getLocalCheckpointTracker().getMaxSeqNo(), equalTo(primarySeqNo)); assertThat(recoveringEngine.getLocalCheckpointTracker().generateSeqNo(), equalTo(primarySeqNo + 1)); - } finally { - IOUtils.close(recoveringEngine); } } @@ -2389,6 +2386,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { // open and recover tlog { for (int i = 0; i < 2; i++) { + trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { assertTrue(engine.isRecovering()); Map userData = engine.getLastCommittedSegmentInfos().getUserData(); @@ -2413,6 +2411,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); store.associateIndexWithNewTranslog(translogUUID); + trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2426,6 +2425,7 @@ public void testCurrentTranslogIDisCommitted() throws IOException { // open and recover tlog with empty tlog { for (int i = 0; i < 2; i++) { + trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { Map userData = engine.getLastCommittedSegmentInfos().getUserData(); assertEquals("1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); @@ -2487,6 +2487,7 @@ public void testTranslogReplayWithFailure() throws IOException { boolean started = false; InternalEngine engine = null; try { + trimUnsafeCommits(config(defaultSettings, store, translogPath, NoMergePolicy.INSTANCE, null)); engine = createEngine(store, translogPath); started = true; } catch (EngineException | IOException e) { @@ -2567,6 +2568,7 @@ public void testSkipTranslogReplay() throws IOException { } assertVisibleCount(engine, numDocs); engine.close(); + trimUnsafeCommits(engine.config()); engine = new InternalEngine(engine.config()); engine.skipTranslogRecovery(); try (Engine.Searcher searcher = engine.acquireSearcher("test")) { @@ -2608,6 +2610,7 @@ public void testTranslogReplay() throws IOException { parser.mappingUpdate = dynamicUpdate(); engine.close(); + trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work engine.recoverFromTranslog(); @@ -3685,6 +3688,7 @@ public void testNoOps() throws IOException { final BiFunction supplier = (ms, lcp) -> new LocalCheckpointTracker( maxSeqNo, localCheckpoint); + trimUnsafeCommits(engine.config()); noOpEngine = new InternalEngine(engine.config(), supplier) { @Override protected long doGenerateSeqNoForOperation(Operation operation) { @@ -3832,6 +3836,7 @@ public void markSeqNoAsCompleted(long seqNo) { completedSeqNos.add(seqNo); } }; + trimUnsafeCommits(engine.config()); actualEngine = new InternalEngine(engine.config(), supplier); final int operations = randomIntBetween(0, 1024); final Set expectedCompletedSeqNos = new HashSet<>(); @@ -3902,6 +3907,7 @@ public void testFillUpSequenceIdGapsOnRecovery() throws IOException { assertEquals(docs - 1, engine.getLocalCheckpointTracker().getCheckpoint()); assertEquals(maxSeqIDOnReplica, replicaEngine.getLocalCheckpointTracker().getMaxSeqNo()); assertEquals(checkpointOnReplica, replicaEngine.getLocalCheckpointTracker().getCheckpoint()); + trimUnsafeCommits(copy(replicaEngine.config(), globalCheckpoint::get)); recoveringEngine = new InternalEngine(copy(replicaEngine.config(), globalCheckpoint::get)); assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().stats().getUncommittedOperations()); recoveringEngine.recoverFromTranslog(); From a75a7d22b223bc054971cb6b7d2d6879140f445e Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 29 Mar 2018 11:59:52 -0700 Subject: [PATCH 19/68] Build: Use branch specific refspec sysprop for bwc builds (#29299) This commit changes the sysprop for overriding the branch bwc builds use to be branch specific. There are 3 different bwc branches built, but all of them currently read the exact same sysprop. For example, with this change and current branches, you can now specify eg `-Dtests.bwc.refspec.6.x=my_6x` and it will build only next-minor-snapshot with that branch, while next-bugfix-snapshot will continue to use 5.6. --- TESTING.asciidoc | 8 ++++---- distribution/bwc/build.gradle | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index bfdca2926026f..97902d56ec8c7 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -476,12 +476,12 @@ branch. Finally, on a release branch, it will test against the most recent relea === BWC Testing against a specific remote/branch Sometimes a backward compatibility change spans two versions. A common case is a new functionality -that needs a BWC bridge in and an unreleased versioned of a release branch (for example, 5.x). +that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of -pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec` system properties: +pulling the release branch from GitHub. You do so using the `tests.bwc.remote` and `tests.bwc.refspec.BRANCH` system properties: ------------------------------------------------- -./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x +./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x ------------------------------------------------- The branch needs to be available on the remote that the BWC makes of the @@ -496,7 +496,7 @@ will need to: will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. -. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x`. +. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`. == Test coverage analysis diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 840c69742a0c7..8d5aa204c487d 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -91,7 +91,7 @@ subprojects { String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}" task checkoutBwcBranch(type: LoggedExec) { - String refspec = System.getProperty("tests.bwc.refspec", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) + String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) dependsOn fetchLatest workingDir = checkoutDir commandLine = ['git', 'checkout', refspec] From 8967dbf4c60b0a3d19f999268894de1612cf487c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 29 Mar 2018 18:33:35 -0400 Subject: [PATCH 20/68] Increase timeout on Netty client latch for tests We use a latch when sending requests during tests so that we do not hang forever waiting for replies on those requests. This commit increases the timeout on that latch to 30 seconds because sometimes 10 seconds is just not enough. --- .../java/org/elasticsearch/http/netty4/Netty4HttpClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java index 7c4471e249102..9719d15778b53 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpClient.java @@ -144,7 +144,7 @@ private synchronized Collection sendRequests( for (HttpRequest request : requests) { channelFuture.channel().writeAndFlush(request); } - latch.await(10, TimeUnit.SECONDS); + latch.await(30, TimeUnit.SECONDS); } finally { if (channelFuture != null) { From 5518640d46e6b3d8c8e1b93413c4312bbfdbd631 Mon Sep 17 00:00:00 2001 From: Sue Gallagher <36747279+Sue-Gallagher@users.noreply.github.com> Date: Thu, 29 Mar 2018 15:50:05 -0700 Subject: [PATCH 21/68] [DOCS] Added info on WGS-84. Closes issue #3590 (#29305) --- docs/reference/mapping/types/geo-shape.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 26974f1f867de..7251361845af5 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -220,7 +220,7 @@ to Elasticsearch types: |======================================================================= |GeoJSON Type |WKT Type |Elasticsearch Type |Description -|`Point` |`POINT` |`point` |A single geographic coordinate. +|`Point` |`POINT` |`point` |A single geographic coordinate. Note: Elasticsearch uses WGS-84 coordinates only. |`LineString` |`LINESTRING` |`linestring` |An arbitrary line given two or more points. |`Polygon` |`POLYGON` |`polygon` |A _closed_ polygon whose first and last point must match, thus requiring `n + 1` vertices to create an `n`-sided From 54f8f819ef3807aa6ab9958ff15957c926bcaec4 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Thu, 29 Mar 2018 22:10:03 -0700 Subject: [PATCH 22/68] Search: Validate script query is run with a single script (#29304) The parsing code for script query currently silently skips by any tokens it does not know about within its parsing loop. The only token it does not catch is an array, which means pasing multiple scripts in via an array will cause the last script to be parsed and one, silently dropping the others. This commit adds validation that arrays are not seen while parsing. --- .../index/query/ScriptQueryBuilder.java | 6 +++++ .../index/query/ScriptQueryBuilderTests.java | 22 ++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index 9cae2f3e061da..3bb7113d215e3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -111,6 +111,12 @@ public static ScriptQueryBuilder fromXContent(XContentParser parser) throws IOEx } else { throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]"); } + } else { + if (token != XContentParser.Token.START_ARRAY) { + throw new AssertionError("Impossible token received: " + token.name()); + } + throw new ParsingException(parser.getTokenLocation(), + "[script] query does not support an array of scripts. Use a bool query with a clause per script instead."); } } diff --git a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java index acde2e65e1fd7..0252468e717dc 100644 --- a/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/ScriptQueryBuilderTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Query; -import org.elasticsearch.index.query.ScriptQueryBuilder.ScriptQuery; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; public class ScriptQueryBuilderTests extends AbstractQueryTestCase { @@ -89,6 +90,25 @@ public void testFromJson() throws IOException { assertEquals(json, "5", parsed.script().getIdOrCode()); } + public void testArrayOfScriptsException() { + String json = + "{\n" + + " \"script\" : {\n" + + " \"script\" : [ {\n" + + " \"source\" : \"5\",\n" + + " \"lang\" : \"mockscript\"\n" + + " },\n" + + " {\n" + + " \"source\" : \"6\",\n" + + " \"lang\" : \"mockscript\"\n" + + " }\n ]" + + " }\n" + + "}"; + + ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); + assertThat(e.getMessage(), containsString("does not support an array of scripts")); + } + @Override protected Set getObjectsHoldingArbitraryContent() { //script_score.script.params can contain arbitrary parameters. no error is expected when From b67b5b1bbd1ec4c8246cca3f5155145d61c734e9 Mon Sep 17 00:00:00 2001 From: olcbean <26058559+olcbean@users.noreply.github.com> Date: Fri, 30 Mar 2018 10:53:29 +0200 Subject: [PATCH 23/68] REST high-level client: add support for Indices Update Settings API (#28892) Relates to #27205 --- .../resources/checkstyle_suppressions.xml | 1 - .../elasticsearch/client/IndicesClient.java | 26 ++++ .../org/elasticsearch/client/Request.java | 24 ++- .../elasticsearch/client/IndicesClientIT.java | 99 ++++++++++++ .../elasticsearch/client/RequestTests.java | 28 ++++ .../IndicesClientDocumentationIT.java | 110 ++++++++++++++ .../high-level/cluster/put_settings.asciidoc | 2 +- .../high-level/indices/put_settings.asciidoc | 142 ++++++++++++++++++ .../high-level/search/search.asciidoc | 2 +- .../high-level/supported-apis.asciidoc | 2 + .../api/indices.put_settings.json | 12 +- .../settings/put/UpdateSettingsRequest.java | 80 +++++++++- .../settings/put/UpdateSettingsResponse.java | 14 ++ .../support/master/AcknowledgedRequest.java | 19 ++- .../support/master/MasterNodeRequest.java | 19 +++ .../indices/RestUpdateSettingsAction.java | 16 +- .../UpdateSettingsRequestStreamableTests.java | 112 ++++++++++++++ .../put/UpdateSettingsRequestTests.java | 87 +++++++++++ .../put/UpdateSettingsResponseTests.java | 46 ++++++ .../test/rest/ESRestTestCase.java | 16 ++ 20 files changed, 831 insertions(+), 26 deletions(-) create mode 100644 docs/java-rest/high-level/indices/put_settings.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b1ef76c9d6a0e..58df6cd7503e9 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -151,7 +151,6 @@ - diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index f5b46a6a53192..ff9c612e1d475 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -45,6 +45,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -406,4 +408,28 @@ public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener + * See Update Indices Settings + * API on elastic.co + */ + public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, Request::indexPutSettings, + UpdateSettingsResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously updates specific index level settings using the Update Indices Settings API + *

+ * See Update Indices Settings + * API on elastic.co + */ + public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener listener, + Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, Request::indexPutSettings, + UpdateSettingsResponse::fromXContent, listener, emptySet(), headers); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 802b1492be092..7b8574258c706 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -43,6 +43,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -598,7 +599,7 @@ static Request rollover(RolloverRequest rolloverRequest) throws IOException { } static Request indicesExist(GetIndexRequest request) { - //this can be called with no indices as argument by transport client, not via REST though + // this can be called with no indices as argument by transport client, not via REST though if (request.indices() == null || request.indices().length == 0) { throw new IllegalArgumentException("indices are mandatory"); } @@ -612,6 +613,20 @@ static Request indicesExist(GetIndexRequest request) { return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); } + static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException { + Params parameters = Params.builder(); + parameters.withTimeout(updateSettingsRequest.timeout()); + parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); + parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); + parameters.withFlatSettings(updateSettingsRequest.flatSettings()); + parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); + + String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); + String endpoint = endpoint(indices, "_settings"); + HttpEntity entity = createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE); + return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); + } + private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); @@ -833,6 +848,13 @@ Params withIncludeDefaults(boolean includeDefaults) { return this; } + Params withPreserveExisting(boolean preserveExisting) { + if (preserveExisting) { + return putParam("preserve_existing", Boolean.TRUE.toString()); + } + return this; + } + Map getParams() { return Collections.unmodifiableMap(params); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 7a29a35d20ab1..0feb78d66b2dd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -49,6 +49,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; @@ -56,6 +58,8 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -63,6 +67,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -72,6 +77,7 @@ import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -609,4 +615,97 @@ public void testRollover() throws IOException { assertEquals("test_new", rolloverResponse.getNewIndex()); } } + + public void testIndexPutSettings() throws IOException { + + final Setting dynamicSetting = IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING; + final String dynamicSettingKey = IndexMetaData.SETTING_NUMBER_OF_REPLICAS; + final int dynamicSettingValue = 0; + + final Setting staticSetting = IndexSettings.INDEX_CHECK_ON_STARTUP; + final String staticSettingKey = IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(); + final String staticSettingValue = "true"; + + final Setting unmodifiableSetting = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING; + final String unmodifiableSettingKey = IndexMetaData.SETTING_NUMBER_OF_SHARDS; + final int unmodifiableSettingValue = 3; + + String index = "index"; + createIndex(index, Settings.EMPTY); + + assertThat(dynamicSetting.getDefault(Settings.EMPTY), not(dynamicSettingValue)); + UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(); + dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build()); + UpdateSettingsResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync); + + assertTrue(response.isAcknowledged()); + Map indexSettingsAsMap = getIndexSettingsAsMap(index); + assertThat(indexSettingsAsMap.get(dynamicSettingKey), equalTo(String.valueOf(dynamicSettingValue))); + + assertThat(staticSetting.getDefault(Settings.EMPTY), not(staticSettingValue)); + UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(); + staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build()); + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.getMessage(), + startsWith("Elasticsearch exception [type=illegal_argument_exception, " + + "reason=Can't update non dynamic settings [[index.shard.check_on_startup]] for open indices [[index/")); + + indexSettingsAsMap = getIndexSettingsAsMap(index); + assertNull(indexSettingsAsMap.get(staticSettingKey)); + + closeIndex(index); + response = execute(staticSettingRequest, highLevelClient().indices()::putSettings, + highLevelClient().indices()::putSettingsAsync); + assertTrue(response.isAcknowledged()); + openIndex(index); + indexSettingsAsMap = getIndexSettingsAsMap(index); + assertThat(indexSettingsAsMap.get(staticSettingKey), equalTo(staticSettingValue)); + + assertThat(unmodifiableSetting.getDefault(Settings.EMPTY), not(unmodifiableSettingValue)); + UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(); + unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build()); + exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.getMessage(), startsWith( + "Elasticsearch exception [type=illegal_argument_exception, " + + "reason=Can't update non dynamic settings [[index.number_of_shards]] for open indices [[index/")); + closeIndex(index); + exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.getMessage(), startsWith( + "Elasticsearch exception [type=illegal_argument_exception, " + + "reason=final index setting [index.number_of_shards], not updateable")); + } + + @SuppressWarnings("unchecked") + private Map getIndexSettingsAsMap(String index) throws IOException { + Map indexSettings = getIndexSettings(index); + return (Map)((Map) indexSettings.get(index)).get("settings"); + } + + public void testIndexPutSettingNonExistent() throws IOException { + + String index = "index"; + UpdateSettingsRequest indexUpdateSettingsRequest = new UpdateSettingsRequest(index); + String setting = "no_idea_what_you_are_talking_about"; + int value = 10; + indexUpdateSettingsRequest.settings(Settings.builder().put(setting, value).build()); + + ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertEquals(RestStatus.NOT_FOUND, exception.status()); + assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); + + createIndex(index, Settings.EMPTY); + exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, + highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); + assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); + assertThat(exception.getMessage(), equalTo( + "Elasticsearch exception [type=illegal_argument_exception, " + + "reason=unknown setting [index.no_idea_what_you_are_talking_about] please check that any required plugins are installed, " + + "or check the breaking changes documentation for removed settings]")); + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 75ac543fbb4ce..920fcd8cdb06b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -1363,6 +1364,33 @@ public void testRollover() throws IOException { assertEquals(expectedParams, request.getParameters()); } + public void testIndexPutSettings() throws IOException { + String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); + Map expectedParams = new HashMap<>(); + setRandomFlatSettings(updateSettingsRequest::flatSettings, expectedParams); + setRandomMasterTimeout(updateSettingsRequest, expectedParams); + setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams); + if (randomBoolean()) { + updateSettingsRequest.setPreserveExisting(randomBoolean()); + if (updateSettingsRequest.isPreserveExisting()) { + expectedParams.put("preserve_existing", "true"); + } + } + + Request request = Request.indexPutSettings(updateSettingsRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + if (indices != null && indices.length > 0) { + endpoint.add(String.join(",", indices)); + } + endpoint.add("_settings"); + assertThat(endpoint.toString(), equalTo(request.getEndpoint())); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertToXContentBody(updateSettingsRequest, request.getEntity()); + assertEquals(expectedParams, request.getParameters()); + } + private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index bc6946eb2dc7f..e33d1e4729b0e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -48,6 +48,8 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; @@ -56,6 +58,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -394,6 +397,7 @@ public void testCreateIndexAsync() throws Exception { // tag::create-index-execute-listener ActionListener listener = new ActionListener() { + @Override public void onResponse(CreateIndexResponse createIndexResponse) { // <1> @@ -1378,4 +1382,110 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testIndexPutSettings() throws Exception { + RestHighLevelClient client = highLevelClient(); + + { + CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); + assertTrue(createIndexResponse.isAcknowledged()); + } + + // tag::put-settings-request + UpdateSettingsRequest request = new UpdateSettingsRequest("index1"); // <1> + UpdateSettingsRequest requestMultiple = + new UpdateSettingsRequest("index1", "index2"); // <2> + UpdateSettingsRequest requestAll = new UpdateSettingsRequest(); // <3> + // end::put-settings-request + + // tag::put-settings-create-settings + String settingKey = "index.number_of_replicas"; + int settingValue = 0; + Settings settings = + Settings.builder() + .put(settingKey, settingValue) + .build(); // <1> + // end::put-settings-create-settings + // tag::put-settings-request-index-settings + request.settings(settings); + // end::put-settings-request-index-settings + + { + // tag::put-settings-settings-builder + Settings.Builder settingsBuilder = + Settings.builder() + .put(settingKey, settingValue); + request.settings(settingsBuilder); // <1> + // end::put-settings-settings-builder + } + { + // tag::put-settings-settings-map + Map map = new HashMap<>(); + map.put(settingKey, settingValue); + request.settings(map); // <1> + // end::put-settings-settings-map + } + { + // tag::put-settings-settings-source + request.settings( + "{\"index.number_of_replicas\": \"2\"}" + , XContentType.JSON); // <1> + // end::put-settings-settings-source + } + + // tag::put-settings-request-flat-settings + request.flatSettings(true); // <1> + // end::put-settings-request-flat-settings + // tag::put-settings-request-preserveExisting + request.setPreserveExisting(false); // <1> + // end::put-settings-request-preserveExisting + // tag::put-settings-request-timeout + request.timeout(TimeValue.timeValueMinutes(2)); // <1> + request.timeout("2m"); // <2> + // end::put-settings-request-timeout + // tag::put-settings-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::put-settings-request-masterTimeout + // tag::put-settings-request-indicesOptions + request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::put-settings-request-indicesOptions + + // tag::put-settings-execute + UpdateSettingsResponse updateSettingsResponse = + client.indices().putSettings(request); + // end::put-settings-execute + + // tag::put-settings-response + boolean acknowledged = updateSettingsResponse.isAcknowledged(); // <1> + // end::put-settings-response + assertTrue(acknowledged); + + // tag::put-settings-execute-listener + ActionListener listener = + new ActionListener() { + + @Override + public void onResponse(UpdateSettingsResponse updateSettingsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-settings-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-settings-execute-async + client.indices().putSettingsAsync(request,listener); // <1> + // end::put-settings-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } diff --git a/docs/java-rest/high-level/cluster/put_settings.asciidoc b/docs/java-rest/high-level/cluster/put_settings.asciidoc index 2d9f55c1e9419..74b479faa0501 100644 --- a/docs/java-rest/high-level/cluster/put_settings.asciidoc +++ b/docs/java-rest/high-level/cluster/put_settings.asciidoc @@ -58,7 +58,7 @@ The following arguments can optionally be provided: -------------------------------------------------- include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-flat-settings] -------------------------------------------------- -<1> Wether the updated settings returned in the `ClusterUpdateSettings` should +<1> Whether the updated settings returned in the `ClusterUpdateSettings` should be in a flat format ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/indices/put_settings.asciidoc b/docs/java-rest/high-level/indices/put_settings.asciidoc new file mode 100644 index 0000000000000..49312da82a400 --- /dev/null +++ b/docs/java-rest/high-level/indices/put_settings.asciidoc @@ -0,0 +1,142 @@ +[[java-rest-high-indices-put-settings]] +=== Update Indices Settings API + +The Update Indices Settings API allows to change specific index level settings. + +[[java-rest-high-indices-put-settings-request]] +==== Update Indices Settings Request + +An `UpdateSettingsRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request] +-------------------------------------------------- +<1> Update settings for one index +<2> Update settings for multiple indices +<3> Update settings for all indices + +==== Indices Settings +At least one setting to be updated must be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-create-settings] +-------------------------------------------------- +<1> Sets the index settings to be applied + +==== Providing the Settings +The settings to be applied can be provided in different ways: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-create-settings] +-------------------------------------------------- +<1> Creates a setting as `Settings` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-builder] +-------------------------------------------------- +<1> Settings provided as `Settings.Builder` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-source] +-------------------------------------------------- +<1> Settings provided as `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-map] +-------------------------------------------------- +<1> Settings provided as a `Map` + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-flat-settings] +-------------------------------------------------- +<1> Whether the updated settings returned in the `UpdateSettings` should +be in a flat format + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-preserveExisting] +-------------------------------------------------- +<1> Whether to update existing settings. If set to `true` existing settings +on an index remain unchanged, the default is `false` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the new setting +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the new setting +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and +how wildcard expressions are expanded + +[[java-rest-high-indices-put-settings-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute] +-------------------------------------------------- + +[[java-rest-high-indices-put-settings-async]] +==== Asynchronous Execution + +The asynchronous execution of an indices update settings requires both the +`UpdateSettingsRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute-async] +-------------------------------------------------- +<1> The `UpdateSettingsRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `UpdateSettingsResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-indices-put-settings-response]] +==== Update Indices Settings Response + +The returned `UpdateSettingsResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-response] +-------------------------------------------------- +<1> Indicates whether all of the nodes have acknowledged the request \ No newline at end of file diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index af81775a90072..3e9472ff2cb58 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -275,7 +275,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-listener] The `SearchResponse` that is returned by executing the search provides details about the search execution itself as well as access to the documents returned. First, there is useful information about the request execution itself, like the -HTTP status code, execution time or wether the request terminated early or timed +HTTP status code, execution time or whether the request terminated early or timed out: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 0330b1903c5bf..29052171cddc6 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -64,6 +64,7 @@ Index Management:: * <> * <> * <> +* <> Mapping Management:: * <> @@ -87,6 +88,7 @@ include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] include::indices/exists_alias.asciidoc[] +include::indices/put_settings.asciidoc[] == Cluster APIs diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index 7c9cf627530ef..3055cb8e32e2e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -16,6 +16,10 @@ "type": "time", "description": "Specify timeout for connection to master" }, + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, "preserve_existing": { "type": "boolean", "description": "Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false`" @@ -34,10 +38,10 @@ "default": "open", "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." }, - "flat_settings": { - "type": "boolean", - "description": "Return settings in flat format (default: false)" - } + "flat_settings": { + "type": "boolean", + "description": "Return settings in flat format (default: false)" + } } }, "body": { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 686bf8a74b85d..197e0db2d32ca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -28,27 +28,34 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * Request for an update index settings action */ -public class UpdateSettingsRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { +public class UpdateSettingsRequest extends AcknowledgedRequest + implements IndicesRequest.Replaceable, ToXContentObject { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); private Settings settings = EMPTY_SETTINGS; private boolean preserveExisting = false; + private boolean flatSettings = false; public UpdateSettingsRequest() { } @@ -68,6 +75,29 @@ public UpdateSettingsRequest(Settings settings, String... indices) { this.settings = settings; } + /** + * Sets the value of "flat_settings". + * Used only by the high-level REST client. + * + * @param flatSettings + * value of "flat_settings" flag to be set + * @return this request + */ + public UpdateSettingsRequest flatSettings(boolean flatSettings) { + this.flatSettings = flatSettings; + return this; + } + + /** + * Return settings in flat format. + * Used only by the high-level REST client. + * + * @return true if settings need to be returned in flat format; false otherwise. + */ + public boolean flatSettings() { + return flatSettings; + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -178,4 +208,50 @@ public void writeTo(StreamOutput out) throws IOException { writeSettingsToStream(settings, out); out.writeBoolean(preserveExisting); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + settings.toXContent(builder, params); + builder.endObject(); + return builder; + } + + public UpdateSettingsRequest fromXContent(XContentParser parser) throws IOException { + Map settings = new HashMap<>(); + Map bodySettings = parser.map(); + Object innerBodySettings = bodySettings.get("settings"); + // clean up in case the body is wrapped with "settings" : { ... } + if (innerBodySettings instanceof Map) { + @SuppressWarnings("unchecked") + Map innerBodySettingsMap = (Map) innerBodySettings; + settings.putAll(innerBodySettingsMap); + } else { + settings.putAll(bodySettings); + } + return this.settings(settings); + } + + @Override + public String toString() { + return "indices : " + Arrays.toString(indices) + "," + Strings.toString(this); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + UpdateSettingsRequest that = (UpdateSettingsRequest) o; + return Objects.equals(settings, that.settings) + && Objects.equals(indicesOptions, that.indicesOptions) + && Objects.equals(preserveExisting, that.preserveExisting) + && Arrays.equals(indices, that.indices); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), settings, indicesOptions, preserveExisting, Arrays.hashCode(indices)); + } + } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java index b1475843aac5f..79116eb8cf5a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -30,6 +32,13 @@ */ public class UpdateSettingsResponse extends AcknowledgedResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "update_index_settings", true, args -> new UpdateSettingsResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } + UpdateSettingsResponse() { } @@ -48,4 +57,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } + + public static UpdateSettingsResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 615aaec487538..d0ca3a0246a03 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; +import java.util.Objects; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; @@ -31,7 +32,8 @@ * Abstract class that allows to mark action requests that support acknowledgements. * Facilitates consistency across different api. */ -public abstract class AcknowledgedRequest> extends MasterNodeRequest implements AckedRequest { +public abstract class AcknowledgedRequest> extends MasterNodeRequest + implements AckedRequest { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); @@ -86,4 +88,19 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); timeout.writeTo(out); } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + AcknowledgedRequest that = (AcknowledgedRequest) o; + return Objects.equals(timeout, that.timeout); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), timeout); + } + } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 2bad309f1cc3b..314cbfd111573 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; +import java.util.Objects; /** * A based request for master based operation. @@ -76,4 +77,22 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); masterNodeTimeout = new TimeValue(in); } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MasterNodeRequest that = (MasterNodeRequest) o; + return Objects.equals(masterNodeTimeout, that.masterNodeTimeout); + } + + @Override + public int hashCode() { + return Objects.hash(masterNodeTimeout); + } + } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 93090ba25eee6..68f696b180267 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -57,21 +57,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); - - Map settings = new HashMap<>(); - try (XContentParser parser = request.contentParser()) { - Map bodySettings = parser.map(); - Object innerBodySettings = bodySettings.get("settings"); - // clean up in case the body is wrapped with "settings" : { ... } - if (innerBodySettings instanceof Map) { - @SuppressWarnings("unchecked") - Map innerBodySettingsMap = (Map) innerBodySettings; - settings.putAll(innerBodySettingsMap); - } else { - settings.putAll(bodySettings); - } - } - updateSettingsRequest.settings(settings); + updateSettingsRequest.fromXContent(request.contentParser()); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java new file mode 100644 index 0000000000000..7b1029129b0ed --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.settings.put; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.test.AbstractStreamableTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.StringJoiner; + +public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTestCase { + + @Override + protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { + if (randomBoolean()) { + return new UpdateSettingsRequest(mutateSettings(request.settings()), request.indices()); + } + return new UpdateSettingsRequest(request.settings(), mutateIndices(request.indices())); + } + + @Override + protected UpdateSettingsRequest createTestInstance() { + return createTestItem(); + } + + @Override + protected UpdateSettingsRequest createBlankInstance() { + return new UpdateSettingsRequest(); + } + + public static UpdateSettingsRequest createTestItem() { + UpdateSettingsRequest request = randomBoolean() + ? new UpdateSettingsRequest(randomSettings(0, 2)) + : new UpdateSettingsRequest(randomSettings(0, 2), randomIndicesNames(0, 2)); + request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + request.setPreserveExisting(randomBoolean()); + request.flatSettings(randomBoolean()); + return request; + } + + private static Settings mutateSettings(Settings settings) { + if (settings.isEmpty()) { + return randomSettings(1, 5); + } + Set allKeys = settings.keySet(); + List keysToBeModified = randomSubsetOf(randomIntBetween(1, allKeys.size()), allKeys); + Builder builder = Settings.builder(); + for (String key : allKeys) { + String value = settings.get(key); + if (keysToBeModified.contains(key)) { + value += randomAlphaOfLengthBetween(2, 5); + } + builder.put(key, value); + } + return builder.build(); + } + + private static String[] mutateIndices(String[] indices) { + if (CollectionUtils.isEmpty(indices)) { + return randomIndicesNames(1, 5); + } + String[] mutated = Arrays.copyOf(indices, indices.length); + Arrays.asList(mutated).replaceAll(i -> i += randomAlphaOfLengthBetween(2, 5)); + return mutated; + } + + private static Settings randomSettings(int min, int max) { + int num = randomIntBetween(min, max); + Builder builder = Settings.builder(); + for (int i = 0; i < num; i++) { + int keyDepth = randomIntBetween(1, 5); + StringJoiner keyJoiner = new StringJoiner(".", "", ""); + for (int d = 0; d < keyDepth; d++) { + keyJoiner.add(randomAlphaOfLengthBetween(3, 5)); + } + builder.put(keyJoiner.toString(), randomAlphaOfLengthBetween(2, 5)); + } + return builder.build(); + } + + private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { + int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); + String[] indices = new String[numIndices]; + for (int i = 0; i < numIndices; i++) { + indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); + } + return indices; + } +} \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java new file mode 100644 index 0000000000000..ff75dbecd520c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.settings.put; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.function.Predicate; + +public class UpdateSettingsRequestTests extends AbstractXContentTestCase { + + private final boolean enclosedSettings = randomBoolean(); + + @Override + protected UpdateSettingsRequest createTestInstance() { + UpdateSettingsRequest testRequest = UpdateSettingsRequestStreamableTests.createTestItem(); + if (enclosedSettings) { + UpdateSettingsRequest requestWithEnclosingSettings = new UpdateSettingsRequest(testRequest.settings()) { + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject("settings"); + this.settings().toXContent(builder, params); + builder.endObject(); + builder.endObject(); + return builder; + } + }; + return requestWithEnclosingSettings; + } + return testRequest; + } + + @Override + protected UpdateSettingsRequest doParseInstance(XContentParser parser) throws IOException { + return new UpdateSettingsRequest().fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + // if the settings are enclose as a "settings" object + // then all other top-level elements will be ignored during the parsing + return enclosedSettings; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + if (enclosedSettings) { + return field -> field.startsWith("settings"); + } + return field -> true; + } + + @Override + protected void assertEqualInstances(UpdateSettingsRequest expectedInstance, UpdateSettingsRequest newInstance) { + // here only the settings should be tested, as this test covers explicitly only the XContent parsing + // the rest of the request fields are tested by the StreamableTests + super.assertEqualInstances(new UpdateSettingsRequest(expectedInstance.settings()), + new UpdateSettingsRequest(newInstance.settings())); + } + + @Override + protected boolean assertToXContentEquivalence() { + // if enclosedSettings are used, disable the XContentEquivalence check as the + // parsed.toXContent is not equivalent to the test instance + return !enclosedSettings; + } + +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java new file mode 100644 index 0000000000000..a3fb484f02e88 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.settings.put; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +public class UpdateSettingsResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected UpdateSettingsResponse doParseInstance(XContentParser parser) { + return UpdateSettingsResponse.fromXContent(parser); + } + + @Override + protected UpdateSettingsResponse createTestInstance() { + return new UpdateSettingsResponse(randomBoolean()); + } + + @Override + protected UpdateSettingsResponse createBlankInstance() { + return new UpdateSettingsResponse(); + } + + @Override + protected UpdateSettingsResponse mutateInstance(UpdateSettingsResponse response) { + return new UpdateSettingsResponse(response.isAcknowledged() == false); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index befc21eb1f697..90a1d2c7f1df2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -32,6 +32,7 @@ import org.apache.http.ssl.SSLContexts; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -491,6 +492,16 @@ private static void updateIndexSettings(String index, Settings settings) throws new StringEntity(Strings.toString(settings), ContentType.APPLICATION_JSON))); } + protected static Map getIndexSettings(String index) throws IOException { + Map params = new HashMap<>(); + params.put("flat_settings", "true"); + Response response = client().performRequest(HttpGet.METHOD_NAME, index + "/_settings", params); + assertOK(response); + try (InputStream is = response.getEntity().getContent()) { + return XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + } + protected static boolean indexExists(String index) throws IOException { Response response = client().performRequest(HttpHead.METHOD_NAME, index); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); @@ -501,6 +512,11 @@ protected static void closeIndex(String index) throws IOException { assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); } + protected static void openIndex(String index) throws IOException { + Response response = client().performRequest(HttpPost.METHOD_NAME, index + "/_open"); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + } + protected static boolean aliasExists(String alias) throws IOException { Response response = client().performRequest(HttpHead.METHOD_NAME, "/_alias/" + alias); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); From bcc9cbfba7558327fe429b1c0f58a5221c259cce Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 30 Mar 2018 10:58:40 +0200 Subject: [PATCH 24/68] Resolve unchecked cast warnings introduced with #28892 --- .../action/support/master/AcknowledgedRequest.java | 2 +- .../elasticsearch/action/support/master/MasterNodeRequest.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index d0ca3a0246a03..0bc58675fce58 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -92,7 +92,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public boolean equals(Object o) { if (super.equals(o)) { - AcknowledgedRequest that = (AcknowledgedRequest) o; + AcknowledgedRequest that = (AcknowledgedRequest) o; return Objects.equals(timeout, that.timeout); } return false; diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 314cbfd111573..545c2490a2b3c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -86,7 +86,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - MasterNodeRequest that = (MasterNodeRequest) o; + MasterNodeRequest that = (MasterNodeRequest) o; return Objects.equals(masterNodeTimeout, that.masterNodeTimeout); } From 437ad06e40d0c02de9bd59802e8e63645ab3e32e Mon Sep 17 00:00:00 2001 From: Fabien Baligand Date: Fri, 30 Mar 2018 15:10:14 +0200 Subject: [PATCH 25/68] fix query string example for boolean query (#28881) --- docs/reference/query-dsl/query-string-query.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index ecefee7757548..17d07e25259f5 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -312,7 +312,7 @@ GET /_search The example above creates a boolean query: -`(ny OR (new AND york)) city)` +`(ny OR (new AND york)) city` that matches documents with the term `ny` or the conjunction `new AND york`. By default the parameter `auto_generate_synonyms_phrase_query` is set to `true`. From 199d131385b81fc919962cba70b7b706811d135f Mon Sep 17 00:00:00 2001 From: Fabien Baligand Date: Fri, 30 Mar 2018 16:36:40 +0200 Subject: [PATCH 26/68] Improve query string docs (#28882) fix query string syntax doc when OR operator is missed --- docs/reference/query-dsl/query-string-syntax.asciidoc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index c73543c99a1d9..54bae2588e1f6 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -23,11 +23,9 @@ search terms, but it is possible to specify other fields in the query syntax: status:active -* where the `title` field contains `quick` or `brown`. - If you omit the OR operator the default operator will be used +* where the `title` field contains `quick` or `brown` title:(quick OR brown) - title:(quick brown) * where the `author` field contains the exact phrase `"john smith"` @@ -36,7 +34,7 @@ search terms, but it is possible to specify other fields in the query syntax: * where any of the fields `book.title`, `book.content` or `book.date` contains `quick` or `brown` (note how we need to escape the `*` with a backslash): - book.\*:(quick brown) + book.\*:(quick OR brown) * where the field `title` has any non-null value: From b7e6fb9ac5c8d0bb771e319c8968ddd89f502720 Mon Sep 17 00:00:00 2001 From: Andy Bristol Date: Fri, 30 Mar 2018 14:09:26 -0700 Subject: [PATCH 27/68] [test] remove Streamable serde assertions (#29307) Removes a set of assertions in the test framework that verified that Streamable objects could be serialized and deserialized across different versions. When this was discussed the consensus was that this approach has not caught many bugs in a long time and that serialization testing of objects was best left to their respective unit and integration tests. This commit also removes a transport interceptor that was used in ESIntegTestCase tests to make these assertions about objects coming in or off the wire. --- .../ExceptionSerializationTests.java | 6 - .../search/query/SearchQueryIT.java | 9 +- .../suggest/CompletionSuggestSearchIT.java | 13 +- .../elasticsearch/test/ESIntegTestCase.java | 4 - .../hamcrest/ElasticsearchAssertions.java | 186 +----------------- .../AssertingTransportInterceptor.java | 130 ------------ .../ElasticsearchAssertionsTests.java | 27 --- 7 files changed, 11 insertions(+), 364 deletions(-) delete mode 100644 test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 0b99b311add8a..1f62eb706a84b 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -81,7 +81,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.ActionTransportException; import org.elasticsearch.transport.ConnectTransportException; @@ -116,7 +115,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; @@ -233,7 +231,6 @@ private T serialize(T exception) throws IOException { } private T serialize(T exception, Version version) throws IOException { - ElasticsearchAssertions.assertVersionSerializable(version, exception); BytesStreamOutput out = new BytesStreamOutput(); out.setVersion(version); out.writeException(exception); @@ -578,9 +575,6 @@ public void testWriteThrowable() throws IOException { } assertArrayEquals(deserialized.getStackTrace(), ex.getStackTrace()); assertTrue(deserialized.getStackTrace().length > 1); - assertVersionSerializable(VersionUtils.randomVersion(random()), cause); - assertVersionSerializable(VersionUtils.randomVersion(random()), ex); - assertVersionSerializable(VersionUtils.randomVersion(random()), deserialized); } } diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index c3f1da82c7984..b2a7c045ddce9 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -94,7 +94,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; @@ -191,7 +190,7 @@ public void testConstantScoreQuery() throws Exception { SearchResponse searchResponse = client().prepareSearch().setQuery(constantScoreQuery(matchQuery("field1", "quick"))).get(); assertHitCount(searchResponse, 2L); for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } searchResponse = client().prepareSearch("test").setQuery( @@ -210,7 +209,7 @@ public void testConstantScoreQuery() throws Exception { assertHitCount(searchResponse, 2L); assertFirstHit(searchResponse, hasScore(searchResponse.getHits().getAt(1).getScore())); for (SearchHit searchHit : searchResponse.getHits().getHits()) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } int num = scaledRandomIntBetween(100, 200); @@ -228,7 +227,7 @@ public void testConstantScoreQuery() throws Exception { long totalHits = searchResponse.getHits().getTotalHits(); SearchHits hits = searchResponse.getHits(); for (SearchHit searchHit : hits) { - assertSearchHit(searchHit, hasScore(1.0f)); + assertThat(searchHit, hasScore(1.0f)); } searchResponse = client().prepareSearch("test_1").setQuery( boolQuery().must(matchAllQuery()).must( @@ -238,7 +237,7 @@ public void testConstantScoreQuery() throws Exception { if (totalHits > 1) { float expected = hits.getAt(0).getScore(); for (SearchHit searchHit : hits) { - assertSearchHit(searchHit, hasScore(expected)); + assertThat(searchHit, hasScore(expected)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index deae6bf1a7ef7..0717e1be2121e 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -69,7 +69,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasScore; import static org.hamcrest.Matchers.contains; @@ -245,8 +244,8 @@ public void testSuggestDocument() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNotNull(option.getHit().getSourceAsMap()); id--; } @@ -280,8 +279,8 @@ public void testSuggestDocumentNoSource() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNull(option.getHit().getSourceAsMap()); id--; } @@ -317,8 +316,8 @@ public void testSuggestDocumentSourceFiltering() throws Exception { int id = numDocs; for (CompletionSuggestion.Entry.Option option : options) { assertThat(option.getText().toString(), equalTo("suggestion" + id)); - assertSearchHit(option.getHit(), hasId("" + id)); - assertSearchHit(option.getHit(), hasScore((id))); + assertThat(option.getHit(), hasId("" + id)); + assertThat(option.getHit(), hasScore((id))); assertNotNull(option.getHit().getSourceAsMap()); Set sourceFields = option.getHit().getSourceAsMap().keySet(); assertThat(sourceFields, contains("a")); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 792d535dc4339..2d027e8bfece5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -143,7 +143,6 @@ import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.AssertingTransportInterceptor; import org.hamcrest.Matchers; import org.junit.After; import org.junit.AfterClass; @@ -1921,9 +1920,6 @@ protected Collection> getMockPlugins() { if (randomBoolean()) { mocks.add(MockSearchService.TestPlugin.class); } - if (randomBoolean()) { - mocks.add(AssertingTransportInterceptor.TestPlugin.class); - } if (randomBoolean()) { mocks.add(MockFieldFilterPlugin.class); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 09e849cf7ca6a..723184410f247 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; @@ -49,13 +48,6 @@ import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -65,18 +57,13 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.NotEqualMessageBuilder; -import org.elasticsearch.test.VersionUtils; import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -88,9 +75,6 @@ import java.util.Map; import java.util.Set; -import static java.util.Collections.emptyList; -import static org.apache.lucene.util.LuceneTestCase.random; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; @@ -124,7 +108,6 @@ public static void assertNoTimeout(ClusterHealthResponse response) { public static void assertAcked(AcknowledgedResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); } public static void assertAcked(DeleteIndexRequestBuilder builder) { @@ -133,7 +116,6 @@ public static void assertAcked(DeleteIndexRequestBuilder builder) { public static void assertAcked(DeleteIndexResponse response) { assertThat("Delete Index failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); } /** @@ -142,7 +124,6 @@ public static void assertAcked(DeleteIndexResponse response) { */ public static void assertAcked(CreateIndexResponse response) { assertThat(response.getClass().getSimpleName() + " failed - not acked", response.isAcknowledged(), equalTo(true)); - assertVersionSerializable(response); assertTrue(response.getClass().getSimpleName() + " failed - index creation acked but not all shards were started", response.isShardsAcknowledged()); } @@ -236,7 +217,6 @@ public static void assertSearchHits(SearchResponse searchResponse, String... ids } assertThat("Some expected ids were not found in search results: " + Arrays.toString(idsSet.toArray(new String[idsSet.size()])) + "." + shardStatus, idsSet.size(), equalTo(0)); - assertVersionSerializable(searchResponse); } public static void assertSortValues(SearchResponse searchResponse, Object[]... sortValues) { @@ -247,7 +227,6 @@ public static void assertSortValues(SearchResponse searchResponse, Object[]... s final Object[] hitsSortValues = hits[i].getSortValues(); assertArrayEquals("Offset " + Integer.toString(i) + ", id " + hits[i].getId(), sortValues[i], hitsSortValues); } - assertVersionSerializable(searchResponse); } public static void assertOrderedSearchHits(SearchResponse searchResponse, String... ids) { @@ -257,14 +236,12 @@ public static void assertOrderedSearchHits(SearchResponse searchResponse, String SearchHit hit = searchResponse.getHits().getHits()[i]; assertThat("Expected id: " + ids[i] + " at position " + i + " but wasn't." + shardStatus, hit.getId(), equalTo(ids[i])); } - assertVersionSerializable(searchResponse); } public static void assertHitCount(SearchResponse countResponse, long expectedHitCount) { if (countResponse.getHits().getTotalHits() != expectedHitCount) { fail("Count is " + countResponse.getHits().getTotalHits() + " but " + expectedHitCount + " was expected. " + formatShardStatus(countResponse)); } - assertVersionSerializable(countResponse); } public static void assertExists(GetResponse response) { @@ -296,26 +273,22 @@ public static void assertSearchHit(SearchResponse searchResponse, int number, Ma assertThat(number, greaterThan(0)); assertThat("SearchHit number must be greater than 0", number, greaterThan(0)); assertThat(searchResponse.getHits().getTotalHits(), greaterThanOrEqualTo((long) number)); - assertSearchHit(searchResponse.getHits().getAt(number - 1), matcher); - assertVersionSerializable(searchResponse); + assertThat(searchResponse.getHits().getAt(number - 1), matcher); } public static void assertNoFailures(SearchResponse searchResponse) { assertThat("Unexpected ShardFailures: " + Arrays.toString(searchResponse.getShardFailures()), searchResponse.getShardFailures().length, equalTo(0)); - assertVersionSerializable(searchResponse); } public static void assertFailures(SearchResponse searchResponse) { assertThat("Expected at least one shard failure, got none", searchResponse.getShardFailures().length, greaterThan(0)); - assertVersionSerializable(searchResponse); } public static void assertNoFailures(BulkResponse response) { assertThat("Unexpected ShardFailures: " + response.buildFailureMessage(), response.hasFailures(), is(false)); - assertVersionSerializable(response); } public static void assertFailures(SearchRequestBuilder searchRequestBuilder, RestStatus restStatus, Matcher reasonMatcher) { @@ -328,7 +301,6 @@ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, Res assertThat(shardSearchFailure.status(), equalTo(restStatus)); assertThat(shardSearchFailure.reason(), reasonMatcher); } - assertVersionSerializable(searchResponse); } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(restStatus)); assertThat(e.toString(), reasonMatcher); @@ -343,26 +315,18 @@ public static void assertFailures(SearchRequestBuilder searchRequestBuilder, Res public static void assertNoFailures(BroadcastResponse response) { assertThat("Unexpected ShardFailures: " + Arrays.toString(response.getShardFailures()), response.getFailedShards(), equalTo(0)); - assertVersionSerializable(response); } public static void assertAllSuccessful(BroadcastResponse response) { assertNoFailures(response); assertThat("Expected all shards successful", response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertVersionSerializable(response); } public static void assertAllSuccessful(SearchResponse response) { assertNoFailures(response); assertThat("Expected all shards successful", response.getSuccessfulShards(), equalTo(response.getTotalShards())); - assertVersionSerializable(response); - } - - public static void assertSearchHit(SearchHit searchHit, Matcher matcher) { - assertThat(searchHit, matcher); - assertVersionSerializable(searchHit); } public static void assertHighlight(SearchResponse resp, int hit, String field, int fragment, Matcher matcher) { @@ -385,7 +349,6 @@ private static void assertHighlight(SearchResponse resp, int hit, String field, assertNoFailures(resp); assertThat("not enough hits", resp.getHits().getHits().length, greaterThan(hit)); assertHighlight(resp.getHits().getHits()[hit], field, fragment, fragmentsMatcher, matcher); - assertVersionSerializable(resp); } private static void assertHighlight(SearchHit hit, String field, int fragment, Matcher fragmentsMatcher, Matcher matcher) { @@ -407,7 +370,6 @@ public static void assertSuggestionSize(Suggest searchSuggest, int entry, int si assertThat(msg, searchSuggest.getSuggestion(key).getName(), equalTo(key)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), equalTo(size)); - assertVersionSerializable(searchSuggest); } public static void assertSuggestionPhraseCollateMatchExists(Suggest searchSuggest, String key, int numberOfPhraseExists) { @@ -434,7 +396,6 @@ public static void assertSuggestion(Suggest searchSuggest, int entry, int ord, S assertThat(msg, searchSuggest.getSuggestion(key).getEntries().size(), greaterThanOrEqualTo(entry)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().size(), greaterThan(ord)); assertThat(msg, searchSuggest.getSuggestion(key).getEntries().get(entry).getOptions().get(ord).getText().string(), equalTo(text)); - assertVersionSerializable(searchSuggest); } /** @@ -638,151 +599,6 @@ public static void assertThrows(ActionFuture future, RestStatus status, String e } } - private static BytesReference serialize(Version version, Streamable streamable) throws IOException { - BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(version); - streamable.writeTo(output); - output.flush(); - return output.bytes(); - } - - public static void assertVersionSerializable(Streamable streamable) { - assertTrue(Version.CURRENT.after(VersionUtils.getPreviousVersion())); - assertVersionSerializable(randomVersion(random()), streamable); - } - - public static void assertVersionSerializable(Version version, Streamable streamable) { - /* - * If possible we fetch the NamedWriteableRegistry from the test cluster. That is the only way to make sure that we properly handle - * when plugins register names. If not possible we'll try and set up a registry based on whatever SearchModule registers. But that - * is a hack at best - it only covers some things. If you end up with errors below and get to this comment I'm sorry. Please find - * a way that sucks less. - */ - NamedWriteableRegistry registry; - if (ESIntegTestCase.isInternalCluster() && ESIntegTestCase.internalCluster().size() > 0) { - registry = ESIntegTestCase.internalCluster().getInstance(NamedWriteableRegistry.class); - } else { - SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); - registry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); - } - assertVersionSerializable(version, streamable, registry); - } - - public static void assertVersionSerializable(Version version, Streamable streamable, NamedWriteableRegistry namedWriteableRegistry) { - try { - Streamable newInstance = tryCreateNewInstance(streamable); - if (newInstance == null) { - return; // can't create a new instance - we never modify a - // streamable that comes in. - } - if (streamable instanceof ActionRequest) { - ((ActionRequest) streamable).validate(); - } - BytesReference orig; - try { - orig = serialize(version, streamable); - } catch (IllegalArgumentException e) { - // Can't serialize with this version so skip this test. - return; - } - StreamInput input = orig.streamInput(); - if (namedWriteableRegistry != null) { - input = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry); - } - input.setVersion(version); - // This is here since some Streamables are being converted into Writeables - // and the readFrom method throws an exception if called - Streamable newInstanceFromStream = tryCreateFromStream(streamable, input); - if (newInstanceFromStream == null) { - newInstance.readFrom(input); - } - assertThat("Stream should be fully read with version [" + version + "] for streamable [" + streamable + "]", input.available(), - equalTo(0)); - BytesReference newBytes = serialize(version, streamable); - if (false == orig.equals(newBytes)) { - // The bytes are different. That is a failure. Lets try to throw a useful exception for debugging. - String message = "Serialization failed with version [" + version + "] bytes should be equal for streamable [" + streamable - + "]"; - // If the bytes are different then comparing BytesRef's toStrings will show you *where* they are different - assertEquals(message, orig.toBytesRef().toString(), newBytes.toBytesRef().toString()); - // They bytes aren't different. Very very weird. - fail(message); - } - } catch (Exception ex) { - throw new RuntimeException("failed to check serialization - version [" + version + "] for streamable [" + streamable + "]", ex); - } - - } - - public static void assertVersionSerializable(Version version, final Exception e) { - ElasticsearchAssertions.assertVersionSerializable(version, new ExceptionWrapper(e)); - } - - public static final class ExceptionWrapper implements Streamable { - - private Exception exception; - - public ExceptionWrapper(Exception e) { - exception = e; - } - - public ExceptionWrapper() { - exception = null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - exception = in.readException(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeException(exception); - } - - } - - - private static Streamable tryCreateNewInstance(Streamable streamable) throws NoSuchMethodException, InstantiationException, - IllegalAccessException, InvocationTargetException { - try { - Class clazz = streamable.getClass(); - Constructor constructor = clazz.getConstructor(); - assertThat(constructor, Matchers.notNullValue()); - Streamable newInstance = constructor.newInstance(); - return newInstance; - } catch (Exception e) { - return null; - } - } - - /** - * This attemps to construct a new {@link Streamable} object that is in the process of - * being converted from {@link Streamable} to {@link Writeable}. Assuming this constructs - * the object successfully, #readFrom should not be called on the constructed object. - * - * @param streamable the object to retrieve the type of class to construct the new instance from - * @param in the stream to read the object from - * @return the newly constructed object from reading the stream - * @throws NoSuchMethodException if constuctor cannot be found - * @throws InstantiationException if the class represents an abstract class - * @throws IllegalAccessException if this {@code Constructor} object - * is enforcing Java language access control and the underlying - * constructor is inaccessible. - * @throws InvocationTargetException if the underlying constructor - * throws an exception. - */ - private static Streamable tryCreateFromStream(Streamable streamable, StreamInput in) throws NoSuchMethodException, - InstantiationException, IllegalAccessException, InvocationTargetException { - try { - Class clazz = streamable.getClass(); - Constructor constructor = clazz.getConstructor(StreamInput.class); - return constructor.newInstance(in); - } catch (NoSuchMethodException e) { - return null; - } - } - /** * Applies basic assertions on the SearchResponse. This method checks if all shards were successful, if * any of the shards threw an exception and if the response is serializable. diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java b/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java deleted file mode 100644 index bbb6c9567362d..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/transport/AssertingTransportInterceptor.java +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.transport; - -import org.elasticsearch.Version; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.plugins.NetworkPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.Random; - -/** - * A transport interceptor that applies {@link ElasticsearchAssertions#assertVersionSerializable(Streamable)} - * to all requests and response objects send across the wire - */ -public final class AssertingTransportInterceptor implements TransportInterceptor { - - private final Random random; - private final NamedWriteableRegistry namedWriteableRegistry; - - public static final class TestPlugin extends Plugin implements NetworkPlugin { - - private final Settings settings; - - public TestPlugin(Settings settings) { - this.settings = settings; - } - - @Override - public List getTransportInterceptors(NamedWriteableRegistry namedWriteableRegistry, - ThreadContext threadContext) { - return Collections.singletonList(new AssertingTransportInterceptor(settings, namedWriteableRegistry)); - } - } - - public AssertingTransportInterceptor(Settings settings, NamedWriteableRegistry namedWriteableRegistry) { - final long seed = ESIntegTestCase.INDEX_TEST_SEED_SETTING.get(settings); - random = new Random(seed); - this.namedWriteableRegistry = namedWriteableRegistry; - } - - @Override - public TransportRequestHandler interceptHandler(String action, String executor, - boolean forceExecution, - TransportRequestHandler actualHandler) { - return new TransportRequestHandler() { - - @Override - public void messageReceived(T request, TransportChannel channel, Task task) throws Exception { - assertVersionSerializable(request); - actualHandler.messageReceived(request, channel, task); - } - - @Override - public void messageReceived(T request, TransportChannel channel) throws Exception { - assertVersionSerializable(request); - actualHandler.messageReceived(request, channel); - } - }; - } - - private void assertVersionSerializable(Streamable streamable) { - Version version = VersionUtils.randomVersionBetween(random, Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); - ElasticsearchAssertions.assertVersionSerializable(version, streamable, namedWriteableRegistry); - - } - - @Override - public AsyncSender interceptSender(final AsyncSender sender) { - return new AsyncSender() { - @Override - public void sendRequest(Transport.Connection connection, String action, TransportRequest request, - TransportRequestOptions options, - final TransportResponseHandler handler) { - assertVersionSerializable(request); - sender.sendRequest(connection, action, request, options, new TransportResponseHandler() { - @Override - public T read(StreamInput in) throws IOException { - return handler.read(in); - } - - @Override - public void handleResponse(T response) { - assertVersionSerializable(response); - handler.handleResponse(response); - } - - @Override - public void handleException(TransportException exp) { - handler.handleException(exp); - } - - @Override - public String executor() { - return handler.executor(); - } - }); - } - }; - } - - -} diff --git a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java index 705f86fbb0797..dc4f135b71461 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertionsTests.java @@ -19,12 +19,7 @@ package org.elasticsearch.test.hamcrest; -import org.elasticsearch.Version; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -34,32 +29,10 @@ import java.io.IOException; -import static java.util.Collections.emptyList; -import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertVersionSerializable; import static org.hamcrest.Matchers.containsString; public class ElasticsearchAssertionsTests extends ESTestCase { - public void testAssertVersionSerializableIsOkWithIllegalArgumentException() { - Version version = randomVersion(random()); - NamedWriteableRegistry registry = new NamedWriteableRegistry(emptyList()); - Streamable testStreamable = new TestStreamable(); - - // Should catch the exception and do nothing. - assertVersionSerializable(version, testStreamable, registry); - } - - public static class TestStreamable implements Streamable { - @Override - public void readFrom(StreamInput in) throws IOException { - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - throw new IllegalArgumentException("Not supported."); - } - } public void testAssertXContentEquivalent() throws IOException { try (XContentBuilder original = JsonXContent.contentBuilder()) { From e70cd35bdac1b17d40b15378fec4c14160b68bf4 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Fri, 30 Mar 2018 16:26:46 -0700 Subject: [PATCH 28/68] Revert "REST high-level client: add support for Indices Update Settings API (#28892)" (#29323) This reverts commit b67b5b1bbd1ec4c8246cca3f5155145d61c734e9. --- .../resources/checkstyle_suppressions.xml | 1 + .../elasticsearch/client/IndicesClient.java | 26 ---- .../org/elasticsearch/client/Request.java | 24 +-- .../elasticsearch/client/IndicesClientIT.java | 99 ------------ .../elasticsearch/client/RequestTests.java | 28 ---- .../IndicesClientDocumentationIT.java | 110 -------------- .../high-level/cluster/put_settings.asciidoc | 2 +- .../high-level/indices/put_settings.asciidoc | 142 ------------------ .../high-level/search/search.asciidoc | 2 +- .../high-level/supported-apis.asciidoc | 2 - .../api/indices.put_settings.json | 12 +- .../settings/put/UpdateSettingsRequest.java | 80 +--------- .../settings/put/UpdateSettingsResponse.java | 14 -- .../support/master/AcknowledgedRequest.java | 18 +-- .../support/master/MasterNodeRequest.java | 19 --- .../indices/RestUpdateSettingsAction.java | 16 +- .../UpdateSettingsRequestStreamableTests.java | 112 -------------- .../put/UpdateSettingsRequestTests.java | 87 ----------- .../put/UpdateSettingsResponseTests.java | 46 ------ .../test/rest/ESRestTestCase.java | 16 -- 20 files changed, 26 insertions(+), 830 deletions(-) delete mode 100644 docs/java-rest/high-level/indices/put_settings.asciidoc delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 58df6cd7503e9..b1ef76c9d6a0e 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -151,6 +151,7 @@ + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index ff9c612e1d475..f5b46a6a53192 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -45,8 +45,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -408,28 +406,4 @@ public void rolloverAsync(RolloverRequest rolloverRequest, ActionListener - * See Update Indices Settings - * API on elastic.co - */ - public UpdateSettingsResponse putSettings(UpdateSettingsRequest updateSettingsRequest, Header... headers) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(updateSettingsRequest, Request::indexPutSettings, - UpdateSettingsResponse::fromXContent, emptySet(), headers); - } - - /** - * Asynchronously updates specific index level settings using the Update Indices Settings API - *

- * See Update Indices Settings - * API on elastic.co - */ - public void putSettingsAsync(UpdateSettingsRequest updateSettingsRequest, ActionListener listener, - Header... headers) { - restHighLevelClient.performRequestAsyncAndParseEntity(updateSettingsRequest, Request::indexPutSettings, - UpdateSettingsResponse::fromXContent, listener, emptySet(), headers); - } - } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java index 7b8574258c706..802b1492be092 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/Request.java @@ -43,7 +43,6 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -599,7 +598,7 @@ static Request rollover(RolloverRequest rolloverRequest) throws IOException { } static Request indicesExist(GetIndexRequest request) { - // this can be called with no indices as argument by transport client, not via REST though + //this can be called with no indices as argument by transport client, not via REST though if (request.indices() == null || request.indices().length == 0) { throw new IllegalArgumentException("indices are mandatory"); } @@ -613,20 +612,6 @@ static Request indicesExist(GetIndexRequest request) { return new Request(HttpHead.METHOD_NAME, endpoint, params.getParams(), null); } - static Request indexPutSettings(UpdateSettingsRequest updateSettingsRequest) throws IOException { - Params parameters = Params.builder(); - parameters.withTimeout(updateSettingsRequest.timeout()); - parameters.withMasterTimeout(updateSettingsRequest.masterNodeTimeout()); - parameters.withIndicesOptions(updateSettingsRequest.indicesOptions()); - parameters.withFlatSettings(updateSettingsRequest.flatSettings()); - parameters.withPreserveExisting(updateSettingsRequest.isPreserveExisting()); - - String[] indices = updateSettingsRequest.indices() == null ? Strings.EMPTY_ARRAY : updateSettingsRequest.indices(); - String endpoint = endpoint(indices, "_settings"); - HttpEntity entity = createEntity(updateSettingsRequest, REQUEST_BODY_CONTENT_TYPE); - return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); - } - private static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); @@ -848,13 +833,6 @@ Params withIncludeDefaults(boolean includeDefaults) { return this; } - Params withPreserveExisting(boolean preserveExisting) { - if (preserveExisting) { - return putParam("preserve_existing", Boolean.TRUE.toString()); - } - return this; - } - Map getParams() { return Collections.unmodifiableMap(params); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 0feb78d66b2dd..7a29a35d20ab1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -49,8 +49,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; @@ -58,8 +56,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -67,7 +63,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -77,7 +72,6 @@ import static org.hamcrest.CoreMatchers.hasItem; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.startsWith; public class IndicesClientIT extends ESRestHighLevelClientTestCase { @@ -615,97 +609,4 @@ public void testRollover() throws IOException { assertEquals("test_new", rolloverResponse.getNewIndex()); } } - - public void testIndexPutSettings() throws IOException { - - final Setting dynamicSetting = IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING; - final String dynamicSettingKey = IndexMetaData.SETTING_NUMBER_OF_REPLICAS; - final int dynamicSettingValue = 0; - - final Setting staticSetting = IndexSettings.INDEX_CHECK_ON_STARTUP; - final String staticSettingKey = IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(); - final String staticSettingValue = "true"; - - final Setting unmodifiableSetting = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING; - final String unmodifiableSettingKey = IndexMetaData.SETTING_NUMBER_OF_SHARDS; - final int unmodifiableSettingValue = 3; - - String index = "index"; - createIndex(index, Settings.EMPTY); - - assertThat(dynamicSetting.getDefault(Settings.EMPTY), not(dynamicSettingValue)); - UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(); - dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build()); - UpdateSettingsResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings, - highLevelClient().indices()::putSettingsAsync); - - assertTrue(response.isAcknowledged()); - Map indexSettingsAsMap = getIndexSettingsAsMap(index); - assertThat(indexSettingsAsMap.get(dynamicSettingKey), equalTo(String.valueOf(dynamicSettingValue))); - - assertThat(staticSetting.getDefault(Settings.EMPTY), not(staticSettingValue)); - UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(); - staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build()); - ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest, - highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); - assertThat(exception.getMessage(), - startsWith("Elasticsearch exception [type=illegal_argument_exception, " - + "reason=Can't update non dynamic settings [[index.shard.check_on_startup]] for open indices [[index/")); - - indexSettingsAsMap = getIndexSettingsAsMap(index); - assertNull(indexSettingsAsMap.get(staticSettingKey)); - - closeIndex(index); - response = execute(staticSettingRequest, highLevelClient().indices()::putSettings, - highLevelClient().indices()::putSettingsAsync); - assertTrue(response.isAcknowledged()); - openIndex(index); - indexSettingsAsMap = getIndexSettingsAsMap(index); - assertThat(indexSettingsAsMap.get(staticSettingKey), equalTo(staticSettingValue)); - - assertThat(unmodifiableSetting.getDefault(Settings.EMPTY), not(unmodifiableSettingValue)); - UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(); - unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build()); - exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, - highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); - assertThat(exception.getMessage(), startsWith( - "Elasticsearch exception [type=illegal_argument_exception, " - + "reason=Can't update non dynamic settings [[index.number_of_shards]] for open indices [[index/")); - closeIndex(index); - exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, - highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); - assertThat(exception.getMessage(), startsWith( - "Elasticsearch exception [type=illegal_argument_exception, " - + "reason=final index setting [index.number_of_shards], not updateable")); - } - - @SuppressWarnings("unchecked") - private Map getIndexSettingsAsMap(String index) throws IOException { - Map indexSettings = getIndexSettings(index); - return (Map)((Map) indexSettings.get(index)).get("settings"); - } - - public void testIndexPutSettingNonExistent() throws IOException { - - String index = "index"; - UpdateSettingsRequest indexUpdateSettingsRequest = new UpdateSettingsRequest(index); - String setting = "no_idea_what_you_are_talking_about"; - int value = 10; - indexUpdateSettingsRequest.settings(Settings.builder().put(setting, value).build()); - - ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, - highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); - assertEquals(RestStatus.NOT_FOUND, exception.status()); - assertThat(exception.getMessage(), equalTo("Elasticsearch exception [type=index_not_found_exception, reason=no such index]")); - - createIndex(index, Settings.EMPTY); - exception = expectThrows(ElasticsearchException.class, () -> execute(indexUpdateSettingsRequest, - highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); - assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(exception.getMessage(), equalTo( - "Elasticsearch exception [type=illegal_argument_exception, " - + "reason=unknown setting [index.no_idea_what_you_are_talking_about] please check that any required plugins are installed, " - + "or check the breaking changes documentation for removed settings]")); - } - } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java index 920fcd8cdb06b..75ac543fbb4ce 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.bulk.BulkRequest; @@ -1364,33 +1363,6 @@ public void testRollover() throws IOException { assertEquals(expectedParams, request.getParameters()); } - public void testIndexPutSettings() throws IOException { - String[] indices = randomBoolean() ? null : randomIndicesNames(0, 2); - UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indices); - Map expectedParams = new HashMap<>(); - setRandomFlatSettings(updateSettingsRequest::flatSettings, expectedParams); - setRandomMasterTimeout(updateSettingsRequest, expectedParams); - setRandomTimeout(updateSettingsRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); - setRandomIndicesOptions(updateSettingsRequest::indicesOptions, updateSettingsRequest::indicesOptions, expectedParams); - if (randomBoolean()) { - updateSettingsRequest.setPreserveExisting(randomBoolean()); - if (updateSettingsRequest.isPreserveExisting()) { - expectedParams.put("preserve_existing", "true"); - } - } - - Request request = Request.indexPutSettings(updateSettingsRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_settings"); - assertThat(endpoint.toString(), equalTo(request.getEndpoint())); - assertEquals(HttpPut.METHOD_NAME, request.getMethod()); - assertToXContentBody(updateSettingsRequest, request.getEntity()); - assertEquals(expectedParams, request.getParameters()); - } - private static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index e33d1e4729b0e..bc6946eb2dc7f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -48,8 +48,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeType; @@ -58,7 +56,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -397,7 +394,6 @@ public void testCreateIndexAsync() throws Exception { // tag::create-index-execute-listener ActionListener listener = new ActionListener() { - @Override public void onResponse(CreateIndexResponse createIndexResponse) { // <1> @@ -1382,110 +1378,4 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - - public void testIndexPutSettings() throws Exception { - RestHighLevelClient client = highLevelClient(); - - { - CreateIndexResponse createIndexResponse = client.indices().create(new CreateIndexRequest("index")); - assertTrue(createIndexResponse.isAcknowledged()); - } - - // tag::put-settings-request - UpdateSettingsRequest request = new UpdateSettingsRequest("index1"); // <1> - UpdateSettingsRequest requestMultiple = - new UpdateSettingsRequest("index1", "index2"); // <2> - UpdateSettingsRequest requestAll = new UpdateSettingsRequest(); // <3> - // end::put-settings-request - - // tag::put-settings-create-settings - String settingKey = "index.number_of_replicas"; - int settingValue = 0; - Settings settings = - Settings.builder() - .put(settingKey, settingValue) - .build(); // <1> - // end::put-settings-create-settings - // tag::put-settings-request-index-settings - request.settings(settings); - // end::put-settings-request-index-settings - - { - // tag::put-settings-settings-builder - Settings.Builder settingsBuilder = - Settings.builder() - .put(settingKey, settingValue); - request.settings(settingsBuilder); // <1> - // end::put-settings-settings-builder - } - { - // tag::put-settings-settings-map - Map map = new HashMap<>(); - map.put(settingKey, settingValue); - request.settings(map); // <1> - // end::put-settings-settings-map - } - { - // tag::put-settings-settings-source - request.settings( - "{\"index.number_of_replicas\": \"2\"}" - , XContentType.JSON); // <1> - // end::put-settings-settings-source - } - - // tag::put-settings-request-flat-settings - request.flatSettings(true); // <1> - // end::put-settings-request-flat-settings - // tag::put-settings-request-preserveExisting - request.setPreserveExisting(false); // <1> - // end::put-settings-request-preserveExisting - // tag::put-settings-request-timeout - request.timeout(TimeValue.timeValueMinutes(2)); // <1> - request.timeout("2m"); // <2> - // end::put-settings-request-timeout - // tag::put-settings-request-masterTimeout - request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> - request.masterNodeTimeout("1m"); // <2> - // end::put-settings-request-masterTimeout - // tag::put-settings-request-indicesOptions - request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> - // end::put-settings-request-indicesOptions - - // tag::put-settings-execute - UpdateSettingsResponse updateSettingsResponse = - client.indices().putSettings(request); - // end::put-settings-execute - - // tag::put-settings-response - boolean acknowledged = updateSettingsResponse.isAcknowledged(); // <1> - // end::put-settings-response - assertTrue(acknowledged); - - // tag::put-settings-execute-listener - ActionListener listener = - new ActionListener() { - - @Override - public void onResponse(UpdateSettingsResponse updateSettingsResponse) { - // <1> - } - - @Override - public void onFailure(Exception e) { - // <2> - } - }; - // end::put-settings-execute-listener - - // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); - - // tag::put-settings-execute-async - client.indices().putSettingsAsync(request,listener); // <1> - // end::put-settings-execute-async - - assertTrue(latch.await(30L, TimeUnit.SECONDS)); - } - } diff --git a/docs/java-rest/high-level/cluster/put_settings.asciidoc b/docs/java-rest/high-level/cluster/put_settings.asciidoc index 74b479faa0501..2d9f55c1e9419 100644 --- a/docs/java-rest/high-level/cluster/put_settings.asciidoc +++ b/docs/java-rest/high-level/cluster/put_settings.asciidoc @@ -58,7 +58,7 @@ The following arguments can optionally be provided: -------------------------------------------------- include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[put-settings-request-flat-settings] -------------------------------------------------- -<1> Whether the updated settings returned in the `ClusterUpdateSettings` should +<1> Wether the updated settings returned in the `ClusterUpdateSettings` should be in a flat format ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/indices/put_settings.asciidoc b/docs/java-rest/high-level/indices/put_settings.asciidoc deleted file mode 100644 index 49312da82a400..0000000000000 --- a/docs/java-rest/high-level/indices/put_settings.asciidoc +++ /dev/null @@ -1,142 +0,0 @@ -[[java-rest-high-indices-put-settings]] -=== Update Indices Settings API - -The Update Indices Settings API allows to change specific index level settings. - -[[java-rest-high-indices-put-settings-request]] -==== Update Indices Settings Request - -An `UpdateSettingsRequest`: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request] --------------------------------------------------- -<1> Update settings for one index -<2> Update settings for multiple indices -<3> Update settings for all indices - -==== Indices Settings -At least one setting to be updated must be provided: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-create-settings] --------------------------------------------------- -<1> Sets the index settings to be applied - -==== Providing the Settings -The settings to be applied can be provided in different ways: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-create-settings] --------------------------------------------------- -<1> Creates a setting as `Settings` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-builder] --------------------------------------------------- -<1> Settings provided as `Settings.Builder` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-source] --------------------------------------------------- -<1> Settings provided as `String` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-settings-map] --------------------------------------------------- -<1> Settings provided as a `Map` - -==== Optional Arguments -The following arguments can optionally be provided: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-flat-settings] --------------------------------------------------- -<1> Whether the updated settings returned in the `UpdateSettings` should -be in a flat format - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-preserveExisting] --------------------------------------------------- -<1> Whether to update existing settings. If set to `true` existing settings -on an index remain unchanged, the default is `false` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-timeout] --------------------------------------------------- -<1> Timeout to wait for the all the nodes to acknowledge the new setting -as a `TimeValue` -<2> Timeout to wait for the all the nodes to acknowledge the new setting -as a `String` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-masterTimeout] --------------------------------------------------- -<1> Timeout to connect to the master node as a `TimeValue` -<2> Timeout to connect to the master node as a `String` - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-request-indicesOptions] --------------------------------------------------- -<1> Setting `IndicesOptions` controls how unavailable indices are resolved and -how wildcard expressions are expanded - -[[java-rest-high-indices-put-settings-sync]] -==== Synchronous Execution - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute] --------------------------------------------------- - -[[java-rest-high-indices-put-settings-async]] -==== Asynchronous Execution - -The asynchronous execution of an indices update settings requires both the -`UpdateSettingsRequest` instance and an `ActionListener` instance to be -passed to the asynchronous method: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute-async] --------------------------------------------------- -<1> The `UpdateSettingsRequest` to execute and the `ActionListener` -to use when the execution completes - -The asynchronous method does not block and returns immediately. Once it is -completed the `ActionListener` is called back using the `onResponse` method -if the execution successfully completed or using the `onFailure` method if -it failed. - -A typical listener for `UpdateSettingsResponse` looks like: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-execute-listener] --------------------------------------------------- -<1> Called when the execution is successfully completed. The response is -provided as an argument -<2> Called in case of a failure. The raised exception is provided as an argument - -[[java-rest-high-indices-put-settings-response]] -==== Update Indices Settings Response - -The returned `UpdateSettingsResponse` allows to retrieve information about the -executed operation as follows: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/IndicesClientDocumentationIT.java[put-settings-response] --------------------------------------------------- -<1> Indicates whether all of the nodes have acknowledged the request \ No newline at end of file diff --git a/docs/java-rest/high-level/search/search.asciidoc b/docs/java-rest/high-level/search/search.asciidoc index 3e9472ff2cb58..af81775a90072 100644 --- a/docs/java-rest/high-level/search/search.asciidoc +++ b/docs/java-rest/high-level/search/search.asciidoc @@ -275,7 +275,7 @@ include-tagged::{doc-tests}/SearchDocumentationIT.java[search-execute-listener] The `SearchResponse` that is returned by executing the search provides details about the search execution itself as well as access to the documents returned. First, there is useful information about the request execution itself, like the -HTTP status code, execution time or whether the request terminated early or timed +HTTP status code, execution time or wether the request terminated early or timed out: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 29052171cddc6..0330b1903c5bf 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -64,7 +64,6 @@ Index Management:: * <> * <> * <> -* <> Mapping Management:: * <> @@ -88,7 +87,6 @@ include::indices/rollover.asciidoc[] include::indices/put_mapping.asciidoc[] include::indices/update_aliases.asciidoc[] include::indices/exists_alias.asciidoc[] -include::indices/put_settings.asciidoc[] == Cluster APIs diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json index 3055cb8e32e2e..7c9cf627530ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.put_settings.json @@ -16,10 +16,6 @@ "type": "time", "description": "Specify timeout for connection to master" }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - }, "preserve_existing": { "type": "boolean", "description": "Whether to update existing settings. If set to `true` existing settings on an index remain unchanged, the default is `false`" @@ -38,10 +34,10 @@ "default": "open", "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." }, - "flat_settings": { - "type": "boolean", - "description": "Return settings in flat format (default: false)" - } + "flat_settings": { + "type": "boolean", + "description": "Return settings in flat format (default: false)" + } } }, "body": { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 197e0db2d32ca..686bf8a74b85d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -28,34 +28,27 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; -import java.util.Arrays; -import java.util.HashMap; import java.util.Map; -import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; -import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; /** * Request for an update index settings action */ -public class UpdateSettingsRequest extends AcknowledgedRequest - implements IndicesRequest.Replaceable, ToXContentObject { +public class UpdateSettingsRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); private Settings settings = EMPTY_SETTINGS; private boolean preserveExisting = false; - private boolean flatSettings = false; public UpdateSettingsRequest() { } @@ -75,29 +68,6 @@ public UpdateSettingsRequest(Settings settings, String... indices) { this.settings = settings; } - /** - * Sets the value of "flat_settings". - * Used only by the high-level REST client. - * - * @param flatSettings - * value of "flat_settings" flag to be set - * @return this request - */ - public UpdateSettingsRequest flatSettings(boolean flatSettings) { - this.flatSettings = flatSettings; - return this; - } - - /** - * Return settings in flat format. - * Used only by the high-level REST client. - * - * @return true if settings need to be returned in flat format; false otherwise. - */ - public boolean flatSettings() { - return flatSettings; - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -208,50 +178,4 @@ public void writeTo(StreamOutput out) throws IOException { writeSettingsToStream(settings, out); out.writeBoolean(preserveExisting); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - settings.toXContent(builder, params); - builder.endObject(); - return builder; - } - - public UpdateSettingsRequest fromXContent(XContentParser parser) throws IOException { - Map settings = new HashMap<>(); - Map bodySettings = parser.map(); - Object innerBodySettings = bodySettings.get("settings"); - // clean up in case the body is wrapped with "settings" : { ... } - if (innerBodySettings instanceof Map) { - @SuppressWarnings("unchecked") - Map innerBodySettingsMap = (Map) innerBodySettings; - settings.putAll(innerBodySettingsMap); - } else { - settings.putAll(bodySettings); - } - return this.settings(settings); - } - - @Override - public String toString() { - return "indices : " + Arrays.toString(indices) + "," + Strings.toString(this); - } - - @Override - public boolean equals(Object o) { - if (super.equals(o)) { - UpdateSettingsRequest that = (UpdateSettingsRequest) o; - return Objects.equals(settings, that.settings) - && Objects.equals(indicesOptions, that.indicesOptions) - && Objects.equals(preserveExisting, that.preserveExisting) - && Arrays.equals(indices, that.indices); - } - return false; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), settings, indicesOptions, preserveExisting, Arrays.hashCode(indices)); - } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java index 79116eb8cf5a7..b1475843aac5f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponse.java @@ -22,8 +22,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -32,13 +30,6 @@ */ public class UpdateSettingsResponse extends AcknowledgedResponse { - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "update_index_settings", true, args -> new UpdateSettingsResponse((boolean) args[0])); - - static { - declareAcknowledgedField(PARSER); - } - UpdateSettingsResponse() { } @@ -57,9 +48,4 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeAcknowledged(out); } - - public static UpdateSettingsResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java index 0bc58675fce58..900955b7b7d1e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/AcknowledgedRequest.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; -import java.util.Objects; import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; @@ -32,8 +31,7 @@ * Abstract class that allows to mark action requests that support acknowledgements. * Facilitates consistency across different api. */ -public abstract class AcknowledgedRequest> extends MasterNodeRequest - implements AckedRequest { +public abstract class AcknowledgedRequest> extends MasterNodeRequest implements AckedRequest { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); @@ -89,18 +87,4 @@ public void writeTo(StreamOutput out) throws IOException { timeout.writeTo(out); } - @Override - public boolean equals(Object o) { - if (super.equals(o)) { - AcknowledgedRequest that = (AcknowledgedRequest) o; - return Objects.equals(timeout, that.timeout); - } - return false; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), timeout); - } - } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java index 545c2490a2b3c..2bad309f1cc3b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/MasterNodeRequest.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; -import java.util.Objects; /** * A based request for master based operation. @@ -77,22 +76,4 @@ public void readFrom(StreamInput in) throws IOException { super.readFrom(in); masterNodeTimeout = new TimeValue(in); } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - MasterNodeRequest that = (MasterNodeRequest) o; - return Objects.equals(masterNodeTimeout, that.masterNodeTimeout); - } - - @Override - public int hashCode() { - return Objects.hash(masterNodeTimeout); - } - } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 68f696b180267..93090ba25eee6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -57,7 +57,21 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.setPreserveExisting(request.paramAsBoolean("preserve_existing", updateSettingsRequest.isPreserveExisting())); updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); - updateSettingsRequest.fromXContent(request.contentParser()); + + Map settings = new HashMap<>(); + try (XContentParser parser = request.contentParser()) { + Map bodySettings = parser.map(); + Object innerBodySettings = bodySettings.get("settings"); + // clean up in case the body is wrapped with "settings" : { ... } + if (innerBodySettings instanceof Map) { + @SuppressWarnings("unchecked") + Map innerBodySettingsMap = (Map) innerBodySettings; + settings.putAll(innerBodySettingsMap); + } else { + settings.putAll(bodySettings); + } + } + updateSettingsRequest.settings(settings); return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java deleted file mode 100644 index 7b1029129b0ed..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestStreamableTests.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.settings.put; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.common.util.CollectionUtils; -import org.elasticsearch.test.AbstractStreamableTestCase; - -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Set; -import java.util.StringJoiner; - -public class UpdateSettingsRequestStreamableTests extends AbstractStreamableTestCase { - - @Override - protected UpdateSettingsRequest mutateInstance(UpdateSettingsRequest request) { - if (randomBoolean()) { - return new UpdateSettingsRequest(mutateSettings(request.settings()), request.indices()); - } - return new UpdateSettingsRequest(request.settings(), mutateIndices(request.indices())); - } - - @Override - protected UpdateSettingsRequest createTestInstance() { - return createTestItem(); - } - - @Override - protected UpdateSettingsRequest createBlankInstance() { - return new UpdateSettingsRequest(); - } - - public static UpdateSettingsRequest createTestItem() { - UpdateSettingsRequest request = randomBoolean() - ? new UpdateSettingsRequest(randomSettings(0, 2)) - : new UpdateSettingsRequest(randomSettings(0, 2), randomIndicesNames(0, 2)); - request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); - request.setPreserveExisting(randomBoolean()); - request.flatSettings(randomBoolean()); - return request; - } - - private static Settings mutateSettings(Settings settings) { - if (settings.isEmpty()) { - return randomSettings(1, 5); - } - Set allKeys = settings.keySet(); - List keysToBeModified = randomSubsetOf(randomIntBetween(1, allKeys.size()), allKeys); - Builder builder = Settings.builder(); - for (String key : allKeys) { - String value = settings.get(key); - if (keysToBeModified.contains(key)) { - value += randomAlphaOfLengthBetween(2, 5); - } - builder.put(key, value); - } - return builder.build(); - } - - private static String[] mutateIndices(String[] indices) { - if (CollectionUtils.isEmpty(indices)) { - return randomIndicesNames(1, 5); - } - String[] mutated = Arrays.copyOf(indices, indices.length); - Arrays.asList(mutated).replaceAll(i -> i += randomAlphaOfLengthBetween(2, 5)); - return mutated; - } - - private static Settings randomSettings(int min, int max) { - int num = randomIntBetween(min, max); - Builder builder = Settings.builder(); - for (int i = 0; i < num; i++) { - int keyDepth = randomIntBetween(1, 5); - StringJoiner keyJoiner = new StringJoiner(".", "", ""); - for (int d = 0; d < keyDepth; d++) { - keyJoiner.add(randomAlphaOfLengthBetween(3, 5)); - } - builder.put(keyJoiner.toString(), randomAlphaOfLengthBetween(2, 5)); - } - return builder.build(); - } - - private static String[] randomIndicesNames(int minIndicesNum, int maxIndicesNum) { - int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum); - String[] indices = new String[numIndices]; - for (int i = 0; i < numIndices; i++) { - indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT); - } - return indices; - } -} \ No newline at end of file diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java deleted file mode 100644 index ff75dbecd520c..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequestTests.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.settings.put; - -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractXContentTestCase; - -import java.io.IOException; -import java.util.function.Predicate; - -public class UpdateSettingsRequestTests extends AbstractXContentTestCase { - - private final boolean enclosedSettings = randomBoolean(); - - @Override - protected UpdateSettingsRequest createTestInstance() { - UpdateSettingsRequest testRequest = UpdateSettingsRequestStreamableTests.createTestItem(); - if (enclosedSettings) { - UpdateSettingsRequest requestWithEnclosingSettings = new UpdateSettingsRequest(testRequest.settings()) { - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startObject("settings"); - this.settings().toXContent(builder, params); - builder.endObject(); - builder.endObject(); - return builder; - } - }; - return requestWithEnclosingSettings; - } - return testRequest; - } - - @Override - protected UpdateSettingsRequest doParseInstance(XContentParser parser) throws IOException { - return new UpdateSettingsRequest().fromXContent(parser); - } - - @Override - protected boolean supportsUnknownFields() { - // if the settings are enclose as a "settings" object - // then all other top-level elements will be ignored during the parsing - return enclosedSettings; - } - - @Override - protected Predicate getRandomFieldsExcludeFilter() { - if (enclosedSettings) { - return field -> field.startsWith("settings"); - } - return field -> true; - } - - @Override - protected void assertEqualInstances(UpdateSettingsRequest expectedInstance, UpdateSettingsRequest newInstance) { - // here only the settings should be tested, as this test covers explicitly only the XContent parsing - // the rest of the request fields are tested by the StreamableTests - super.assertEqualInstances(new UpdateSettingsRequest(expectedInstance.settings()), - new UpdateSettingsRequest(newInstance.settings())); - } - - @Override - protected boolean assertToXContentEquivalence() { - // if enclosedSettings are used, disable the XContentEquivalence check as the - // parsed.toXContent is not equivalent to the test instance - return !enclosedSettings; - } - -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java deleted file mode 100644 index a3fb484f02e88..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsResponseTests.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.settings.put; - -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.test.AbstractStreamableXContentTestCase; - -public class UpdateSettingsResponseTests extends AbstractStreamableXContentTestCase { - - @Override - protected UpdateSettingsResponse doParseInstance(XContentParser parser) { - return UpdateSettingsResponse.fromXContent(parser); - } - - @Override - protected UpdateSettingsResponse createTestInstance() { - return new UpdateSettingsResponse(randomBoolean()); - } - - @Override - protected UpdateSettingsResponse createBlankInstance() { - return new UpdateSettingsResponse(); - } - - @Override - protected UpdateSettingsResponse mutateInstance(UpdateSettingsResponse response) { - return new UpdateSettingsResponse(response.isAcknowledged() == false); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 90a1d2c7f1df2..befc21eb1f697 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -32,7 +32,6 @@ import org.apache.http.ssl.SSLContexts; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -492,16 +491,6 @@ private static void updateIndexSettings(String index, Settings settings) throws new StringEntity(Strings.toString(settings), ContentType.APPLICATION_JSON))); } - protected static Map getIndexSettings(String index) throws IOException { - Map params = new HashMap<>(); - params.put("flat_settings", "true"); - Response response = client().performRequest(HttpGet.METHOD_NAME, index + "/_settings", params); - assertOK(response); - try (InputStream is = response.getEntity().getContent()) { - return XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); - } - } - protected static boolean indexExists(String index) throws IOException { Response response = client().performRequest(HttpHead.METHOD_NAME, index); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); @@ -512,11 +501,6 @@ protected static void closeIndex(String index) throws IOException { assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); } - protected static void openIndex(String index) throws IOException { - Response response = client().performRequest(HttpPost.METHOD_NAME, index + "/_open"); - assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); - } - protected static boolean aliasExists(String alias) throws IOException { Response response = client().performRequest(HttpHead.METHOD_NAME, "/_alias/" + alias); return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode(); From 3ca9310aee889f54fd24196c118056ce6790a83c Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 2 Apr 2018 09:59:12 +0100 Subject: [PATCH 29/68] Update docs on vertex ordering (#27963) At time of writing, GeoJSON did not enforce a specific ordering of vertices in a polygon, but it now does. We occasionally get reports of Elasticsearch rejecting apparently-valid GeoJSON because of badly oriented polygons, and it's helpful to be able to point at this bit of the documentation when responding. --- .../mapping/types/geo-shape.asciidoc | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 7251361845af5..d7bf3e1f6798c 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -378,22 +378,24 @@ POST /example/doc // CONSOLE // TEST[skip:https://github.com/elastic/elasticsearch/issues/23836] -*IMPORTANT NOTE:* GeoJSON and WKT do not enforce a specific order for vertices -thus ambiguous polygons around the dateline and poles are possible. To alleviate -ambiguity the Open Geospatial Consortium (OGC) -http://www.opengeospatial.org/standards/sfa[Simple Feature Access] specification -defines the following vertex ordering: - -* Outer Ring - Counterclockwise -* Inner Ring(s) / Holes - Clockwise - -For polygons that do not cross the dateline, vertex order will not matter in -Elasticsearch. For polygons that do cross the dateline, Elasticsearch requires -vertex ordering to comply with the OGC specification. Otherwise, an unintended polygon -may be created and unexpected query/filter results will be returned. - -The following provides an example of an ambiguous polygon. Elasticsearch will apply -OGC standards to eliminate ambiguity resulting in a polygon that crosses the dateline. +*IMPORTANT NOTE:* WKT does not enforce a specific order for vertices thus +ambiguous polygons around the dateline and poles are possible. +https://tools.ietf.org/html/rfc7946#section-3.1.6[GeoJSON] mandates that the +outer polygon must be counterclockwise and interior shapes must be clockwise, +which agrees with the Open Geospatial Consortium (OGC) +http://www.opengeospatial.org/standards/sfa[Simple Feature Access] +specification for vertex ordering. + +Elasticsearch accepts both clockwise and counterclockwise polygons if they +appear not to cross the dateline (i.e. they cross less than 180° of longitude), +but for polygons that do cross the dateline (or for other polygons wider than +180°) Elasticsearch requires the vertex ordering to comply with the OGC and +GeoJSON specifications. Otherwise, an unintended polygon may be created and +unexpected query/filter results will be returned. + +The following provides an example of an ambiguous polygon. Elasticsearch will +apply the GeoJSON standard to eliminate ambiguity resulting in a polygon that +crosses the dateline. [source,js] -------------------------------------------------- From 40d19532bce4a8a43a9e940235febd10b9b9ad36 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 2 Apr 2018 10:03:42 +0100 Subject: [PATCH 30/68] Clarify expectations of false positives/negatives (#27964) Today this part of the documentation just says that Geo queries are not 100% accurate, but in fact we can be more precise about which kinds of queries see which kinds of error. This commit clarifies this point. --- docs/reference/mapping/types/geo-shape.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index d7bf3e1f6798c..43ad71e37073f 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -154,12 +154,12 @@ are provided: [float] ===== Accuracy -Geo_shape does not provide 100% accuracy and depending on how it is -configured it may return some false positives or false negatives for -certain queries. To mitigate this, it is important to select an -appropriate value for the tree_levels parameter and to adjust -expectations accordingly. For example, a point may be near the border of -a particular grid cell and may thus not match a query that only matches the +Geo_shape does not provide 100% accuracy and depending on how it is configured +it may return some false positives for `INTERSECTS`, `WITHIN` and `CONTAINS` +queries, and some false negatives for `DISJOINT` queries. To mitigate this, it +is important to select an appropriate value for the tree_levels parameter and +to adjust expectations accordingly. For example, a point may be near the border +of a particular grid cell and may thus not match a query that only matches the cell right next to it -- even though the shape is very close to the point. [float] From 3be960d1c26b7103fafc4a42ca5815e57a675920 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 2 Apr 2018 10:07:28 +0100 Subject: [PATCH 31/68] Minor cleanup in the InternalEngine (#29241) Fix a couple of minor things in the InternalEngine: * Rename loadOrGenerateHistoryUUID to reflect that it always generates a UUID * Move .acquire() call next to the associated try {} block. --- .../elasticsearch/index/engine/InternalEngine.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 24d1fc16b702d..a873898d52c21 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -184,8 +184,7 @@ public InternalEngine(EngineConfig engineConfig) { new CombinedDeletionPolicy(logger, translogDeletionPolicy, translog::getLastSyncedGlobalCheckpoint); writer = createWriter(); bootstrapAppendOnlyInfoFromWriter(writer); - historyUUID = loadOrGenerateHistoryUUID(writer); - Objects.requireNonNull(historyUUID, "history uuid should not be null"); + historyUUID = loadHistoryUUID(writer); indexWriter = writer; } catch (IOException | TranslogCorruptedException e) { throw new EngineCreationFailureException(shardId, "failed to create engine", e); @@ -275,10 +274,11 @@ protected IndexSearcher refreshIfNeeded(IndexSearcher referenceToRefresh) throws // steal it by calling incRef on the "stolen" reader internalSearcherManager.maybeRefreshBlocking(); IndexSearcher acquire = internalSearcherManager.acquire(); - final IndexReader previousReader = referenceToRefresh.getIndexReader(); - assert previousReader instanceof ElasticsearchDirectoryReader: - "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; try { + final IndexReader previousReader = referenceToRefresh.getIndexReader(); + assert previousReader instanceof ElasticsearchDirectoryReader: + "searcher's IndexReader should be an ElasticsearchDirectoryReader, but got " + previousReader; + final IndexReader newReader = acquire.getIndexReader(); if (newReader == previousReader) { // nothing has changed - both ref managers share the same instance so we can use reference equality @@ -473,7 +473,7 @@ private String loadTranslogUUIDFromLastCommit() throws IOException { /** * Reads the current stored history ID from the IW commit data. */ - private String loadOrGenerateHistoryUUID(final IndexWriter writer) throws IOException { + private String loadHistoryUUID(final IndexWriter writer) throws IOException { final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); if (uuid == null) { throw new IllegalStateException("commit doesn't contain history uuid"); From 1172b3b31b623fe64d679e22cc117e847dc39634 Mon Sep 17 00:00:00 2001 From: David Leatherman Date: Mon, 2 Apr 2018 14:24:25 -0400 Subject: [PATCH 32/68] Java versions for ci (#29320) * Add test matrix axis files for periodic java testing * Add properties file defining java versions to use * We have no openjdk8 * Remove openjdk Oracle Java and OpenJDK basically only differ in license, so we don't need to test both. --- .ci/java-versions.properties | 8 ++++++++ .ci/matrix-build-javas.yml | 9 +++++++++ .ci/matrix-java-exclusions.yml | 14 ++++++++++++++ .ci/matrix-runtime-javas.yml | 10 ++++++++++ 4 files changed, 41 insertions(+) create mode 100644 .ci/java-versions.properties create mode 100644 .ci/matrix-build-javas.yml create mode 100644 .ci/matrix-java-exclusions.yml create mode 100644 .ci/matrix-runtime-javas.yml diff --git a/.ci/java-versions.properties b/.ci/java-versions.properties new file mode 100644 index 0000000000000..a0713ce128e6f --- /dev/null +++ b/.ci/java-versions.properties @@ -0,0 +1,8 @@ +# This file is used with all of the non-matrix tests in Jenkins. + +# This .properties file defines the versions of Java with which to +# build and test Elasticsearch for this branch. Valid Java versions +# are 'java' or 'openjdk' followed by the major release number. + +ES_BUILD_JAVA=java10 +ES_RUNTIME_JAVA=java8 diff --git a/.ci/matrix-build-javas.yml b/.ci/matrix-build-javas.yml new file mode 100644 index 0000000000000..17aa4b0bf222a --- /dev/null +++ b/.ci/matrix-build-javas.yml @@ -0,0 +1,9 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are included as an axis of the matrix. + +# This axis of the build matrix represents the versions of Java with +# which Elasticsearch will be built. Valid Java versions are 'java' +# or 'openjdk' followed by the major release number. + +ES_BUILD_JAVA: + - java10 diff --git a/.ci/matrix-java-exclusions.yml b/.ci/matrix-java-exclusions.yml new file mode 100644 index 0000000000000..e2adf9f0955db --- /dev/null +++ b/.ci/matrix-java-exclusions.yml @@ -0,0 +1,14 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are excluded from the test matrix. + +# The yaml mapping below represents a single intersection on the build +# matrix where a test *should not* be run. The value of the exclude +# key is a list of maps. + +# In this example all of the combinations defined in the matrix will +# run except for the test that builds with java10 and runs with java8. +# exclude: +# - ES_BUILD_JAVA: java10 +# ES_RUNTIME_JAVA: java8 + +exclude: diff --git a/.ci/matrix-runtime-javas.yml b/.ci/matrix-runtime-javas.yml new file mode 100644 index 0000000000000..72282ca805afd --- /dev/null +++ b/.ci/matrix-runtime-javas.yml @@ -0,0 +1,10 @@ +# This file is used as part of a matrix build in Jenkins where the +# values below are included as an axis of the matrix. + +# This axis of the build matrix represents the versions of Java on +# which Elasticsearch will be tested. Valid Java versions are 'java' +# or 'openjdk' followed by the major release number. + +ES_RUNTIME_JAVA: + - java8 + - java10 From 6b2167f462a17372426e89938cca9b8aabfa9b97 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Mon, 2 Apr 2018 15:58:31 -0600 Subject: [PATCH 33/68] Begin moving XContent to a separate lib/artifact (#29300) * Begin moving XContent to a separate lib/artifact This commit moves a large portion of the XContent code from the `server` project to the `libs/xcontent` project. For the pieces that have been moved, some helpers have been duplicated to allow them to be decoupled from ES helper classes. In addition, `Booleans` and `CheckedFunction` have been moved to the `elasticsearch-core` project. This decoupling is a move so that we can eventually make things like the high-level REST client not rely on the entire ES jar, only the parts it needs. There are some pieces that are still not decoupled, in particular some of the XContent tests still remain in the server project, this is because they test a large portion of the pluggable xcontent pieces through `XContentElasticsearchException`. They may be decoupled in future work. Additionally, there may be more piecese that we want to move to the xcontent lib in the future that are not part of this PR, this is a starting point. Relates to #28504 --- build.gradle | 1 + .../resources/checkstyle_suppressions.xml | 2 - .../org/elasticsearch/common/Booleans.java | 17 +++- .../elasticsearch/common/CheckedFunction.java | 0 .../java/org/elasticsearch/common/Glob.java | 70 +++++++++++++++ libs/x-content/build.gradle | 85 +++++++++++++++++++ .../x-content}/licenses/jackson-LICENSE | 0 .../x-content}/licenses/jackson-NOTICE | 0 .../licenses/jackson-core-2.8.10.jar.sha1 | 0 .../jackson-dataformat-cbor-2.8.10.jar.sha1 | 0 .../jackson-dataformat-smile-2.8.10.jar.sha1 | 0 .../jackson-dataformat-yaml-2.8.10.jar.sha1 | 0 .../licenses/snakeyaml-1.17.jar.sha1 | 0 .../x-content}/licenses/snakeyaml-LICENSE.txt | 0 .../x-content}/licenses/snakeyaml-NOTICE.txt | 0 .../org/elasticsearch/common/ParseField.java | 4 +- .../common/xcontent/ContextParser.java | 0 .../common/xcontent/DeprecationHandler.java | 0 .../NamedObjectNotFoundException.java | 0 .../xcontent/NamedXContentRegistry.java | 0 .../common/xcontent/ToXContent.java | 2 + .../common/xcontent/ToXContentFragment.java | 0 .../common/xcontent/ToXContentObject.java | 0 .../common/xcontent/XContent.java | 2 + .../common/xcontent/XContentBuilder.java | 46 ++++++++-- .../xcontent/XContentBuilderExtension.java | 0 .../common/xcontent/XContentFactory.java | 11 +-- .../common/xcontent/XContentGenerator.java | 51 +++++++++++ .../common/xcontent/XContentLocation.java | 0 .../xcontent/XContentParseException.java | 0 .../common/xcontent/XContentParser.java | 0 .../common/xcontent/XContentType.java | 0 .../common/xcontent/cbor/CborXContent.java | 4 +- .../xcontent/cbor/CborXContentGenerator.java | 0 .../xcontent/cbor/CborXContentParser.java | 0 .../common/xcontent/json/JsonXContent.java | 0 .../xcontent/json/JsonXContentGenerator.java | 73 +++++++++++++++- .../xcontent/json/JsonXContentParser.java | 0 .../common/xcontent/smile/SmileXContent.java | 3 +- .../smile/SmileXContentGenerator.java | 0 .../xcontent/smile/SmileXContentParser.java | 0 .../support/AbstractXContentParser.java | 37 +++++++- .../support/filtering/FilterPath.java | 4 +- .../filtering/FilterPathBasedFilter.java | 3 +- .../common/xcontent/yaml/YamlXContent.java | 0 .../xcontent/yaml/YamlXContentGenerator.java | 0 .../xcontent/yaml/YamlXContentParser.java | 0 .../elasticsearch/common/ParseFieldTests.java | 0 .../common/xcontent/XContentParserTests.java | 2 +- .../percolator/PercolateQueryBuilder.java | 2 +- server/build.gradle | 9 +- .../common/xcontent/Booleans.java | 46 ---------- .../common/xcontent/XContentHelper.java | 84 ------------------ .../ingest/PipelineConfiguration.java | 3 +- .../test/AbstractQueryTestCase.java | 7 +- .../ElasticsearchAssertionsTests.java | 3 +- 56 files changed, 395 insertions(+), 176 deletions(-) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/Booleans.java (94%) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/CheckedFunction.java (100%) create mode 100644 libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java create mode 100644 libs/x-content/build.gradle rename {server => libs/x-content}/licenses/jackson-LICENSE (100%) rename {server => libs/x-content}/licenses/jackson-NOTICE (100%) rename {server => libs/x-content}/licenses/jackson-core-2.8.10.jar.sha1 (100%) rename {server => libs/x-content}/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 (100%) rename {server => libs/x-content}/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 (100%) rename {server => libs/x-content}/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 (100%) rename {server => libs/x-content}/licenses/snakeyaml-1.17.jar.sha1 (100%) rename {server => libs/x-content}/licenses/snakeyaml-LICENSE.txt (100%) rename {server => libs/x-content}/licenses/snakeyaml-NOTICE.txt (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/ParseField.java (98%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java (98%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContent.java (99%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java (95%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java (96%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java (66%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/XContentType.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java (96%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java (84%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java (98%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java (90%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java (97%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java (97%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java (100%) rename {server => libs/x-content}/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java (100%) rename {server => libs/x-content}/src/test/java/org/elasticsearch/common/ParseFieldTests.java (100%) rename {server => libs/x-content}/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java (99%) delete mode 100644 server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java diff --git a/build.gradle b/build.gradle index 94823e0ce5b1a..dce2adf5ee0bd 100644 --- a/build.gradle +++ b/build.gradle @@ -196,6 +196,7 @@ subprojects { "org.elasticsearch:elasticsearch-cli:${version}": ':server:cli', "org.elasticsearch:elasticsearch-core:${version}": ':libs:elasticsearch-core', "org.elasticsearch:elasticsearch-nio:${version}": ':libs:elasticsearch-nio', + "org.elasticsearch:elasticsearch-x-content:${version}": ':libs:x-content', "org.elasticsearch:elasticsearch-secure-sm:${version}": ':libs:secure-sm', "org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest', "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer', diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index b1ef76c9d6a0e..11f19f683e557 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -248,9 +248,7 @@ - - diff --git a/server/src/main/java/org/elasticsearch/common/Booleans.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java similarity index 94% rename from server/src/main/java/org/elasticsearch/common/Booleans.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java index 025174c477d64..7447f0111f7e2 100644 --- a/server/src/main/java/org/elasticsearch/common/Booleans.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Booleans.java @@ -73,6 +73,19 @@ public static boolean parseBoolean(String value) { throw new IllegalArgumentException("Failed to parse value [" + value + "] as only [true] or [false] are allowed."); } + private static boolean hasText(CharSequence str) { + if (str == null || str.length() == 0) { + return false; + } + int strLen = str.length(); + for (int i = 0; i < strLen; i++) { + if (!Character.isWhitespace(str.charAt(i))) { + return true; + } + } + return false; + } + /** * * @param value text to parse. @@ -80,14 +93,14 @@ public static boolean parseBoolean(String value) { * @return see {@link #parseBoolean(String)} */ public static boolean parseBoolean(String value, boolean defaultValue) { - if (Strings.hasText(value)) { + if (hasText(value)) { return parseBoolean(value); } return defaultValue; } public static Boolean parseBoolean(String value, Boolean defaultValue) { - if (Strings.hasText(value)) { + if (hasText(value)) { return parseBoolean(value); } return defaultValue; diff --git a/server/src/main/java/org/elasticsearch/common/CheckedFunction.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/CheckedFunction.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/CheckedFunction.java diff --git a/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java new file mode 100644 index 0000000000000..f0baf75bd4db1 --- /dev/null +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Glob.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +/** + * Utility class for glob-like matching + */ +public class Glob { + + /** + * Match a String against the given pattern, supporting the following simple + * pattern styles: "xxx*", "*xxx", "*xxx*" and "xxx*yyy" matches (with an + * arbitrary number of pattern parts), as well as direct equality. + * + * @param pattern the pattern to match against + * @param str the String to match + * @return whether the String matches the given pattern + */ + public static boolean globMatch(String pattern, String str) { + if (pattern == null || str == null) { + return false; + } + int firstIndex = pattern.indexOf('*'); + if (firstIndex == -1) { + return pattern.equals(str); + } + if (firstIndex == 0) { + if (pattern.length() == 1) { + return true; + } + int nextIndex = pattern.indexOf('*', firstIndex + 1); + if (nextIndex == -1) { + return str.endsWith(pattern.substring(1)); + } else if (nextIndex == 1) { + // Double wildcard "**" - skipping the first "*" + return globMatch(pattern.substring(1), str); + } + String part = pattern.substring(1, nextIndex); + int partIndex = str.indexOf(part); + while (partIndex != -1) { + if (globMatch(pattern.substring(nextIndex), str.substring(partIndex + part.length()))) { + return true; + } + partIndex = str.indexOf(part, partIndex + 1); + } + return false; + } + return (str.length() >= firstIndex && + pattern.substring(0, firstIndex).equals(str.substring(0, firstIndex)) && + globMatch(pattern.substring(firstIndex), str.substring(firstIndex))); + } + +} diff --git a/libs/x-content/build.gradle b/libs/x-content/build.gradle new file mode 100644 index 0000000000000..c8b37108ff93c --- /dev/null +++ b/libs/x-content/build.gradle @@ -0,0 +1,85 @@ +import org.elasticsearch.gradle.precommit.PrecommitTasks + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +apply plugin: 'elasticsearch.build' +apply plugin: 'nebula.maven-base-publish' +apply plugin: 'nebula.maven-scm' + +archivesBaseName = 'elasticsearch-x-content' + +publishing { + publications { + nebula { + artifactId = archivesBaseName + } + } +} + +dependencies { + compile "org.elasticsearch:elasticsearch-core:${version}" + + compile "org.yaml:snakeyaml:${versions.snakeyaml}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" + compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" + + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" + + if (isEclipse == false || project.path == ":libs:x-content-tests") { + testCompile("org.elasticsearch.test:framework:${version}") { + exclude group: 'org.elasticsearch', module: 'elasticsearch-x-content' + } + } + +} + +forbiddenApisMain { + // x-content does not depend on server + // TODO: Need to decide how we want to handle for forbidden signatures with the changes to core + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +if (isEclipse) { + // in eclipse the project is under a fake root, we need to change around the source sets + sourceSets { + if (project.path == ":libs:x-content") { + main.java.srcDirs = ['java'] + main.resources.srcDirs = ['resources'] + } else { + test.java.srcDirs = ['java'] + test.resources.srcDirs = ['resources'] + } + } +} + +thirdPartyAudit.excludes = [ + // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) + 'com.fasterxml.jackson.databind.ObjectMapper', +] + +dependencyLicenses { + mapping from: /jackson-.*/, to: 'jackson' +} + +jarHell.enabled = false diff --git a/server/licenses/jackson-LICENSE b/libs/x-content/licenses/jackson-LICENSE similarity index 100% rename from server/licenses/jackson-LICENSE rename to libs/x-content/licenses/jackson-LICENSE diff --git a/server/licenses/jackson-NOTICE b/libs/x-content/licenses/jackson-NOTICE similarity index 100% rename from server/licenses/jackson-NOTICE rename to libs/x-content/licenses/jackson-NOTICE diff --git a/server/licenses/jackson-core-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-core-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-core-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-core-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 diff --git a/server/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 similarity index 100% rename from server/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 rename to libs/x-content/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 diff --git a/server/licenses/snakeyaml-1.17.jar.sha1 b/libs/x-content/licenses/snakeyaml-1.17.jar.sha1 similarity index 100% rename from server/licenses/snakeyaml-1.17.jar.sha1 rename to libs/x-content/licenses/snakeyaml-1.17.jar.sha1 diff --git a/server/licenses/snakeyaml-LICENSE.txt b/libs/x-content/licenses/snakeyaml-LICENSE.txt similarity index 100% rename from server/licenses/snakeyaml-LICENSE.txt rename to libs/x-content/licenses/snakeyaml-LICENSE.txt diff --git a/server/licenses/snakeyaml-NOTICE.txt b/libs/x-content/licenses/snakeyaml-NOTICE.txt similarity index 100% rename from server/licenses/snakeyaml-NOTICE.txt rename to libs/x-content/licenses/snakeyaml-NOTICE.txt diff --git a/server/src/main/java/org/elasticsearch/common/ParseField.java b/libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/ParseField.java rename to libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java index 2c68ea7711bb2..084d82372c0ce 100644 --- a/server/src/main/java/org/elasticsearch/common/ParseField.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/ParseField.java @@ -35,6 +35,8 @@ public class ParseField { private String allReplacedWith = null; private final String[] allNames; + private static final String[] EMPTY = new String[0]; + /** * @param name * the primary name for this field. This will be returned by @@ -46,7 +48,7 @@ public class ParseField { public ParseField(String name, String... deprecatedNames) { this.name = name; if (deprecatedNames == null || deprecatedNames.length == 0) { - this.deprecatedNames = Strings.EMPTY_ARRAY; + this.deprecatedNames = EMPTY; } else { final HashSet set = new HashSet<>(); Collections.addAll(set, deprecatedNames); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ContextParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/DeprecationHandler.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedObjectNotFoundException.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/NamedXContentRegistry.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java index f74bdec17a9f6..74542bb809f71 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContent.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Booleans; + import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentFragment.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ToXContentObject.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java similarity index 99% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java index 6f6ee4ffdda54..1eaaac104f29d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContent.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.xcontent; +import org.elasticsearch.common.Booleans; + import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java similarity index 95% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index 86b56f29e69be..eae5e48a557f3 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.xcontent; -import org.elasticsearch.common.util.CollectionUtils; - import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.Flushable; @@ -35,6 +33,7 @@ import java.util.Date; import java.util.GregorianCalendar; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -740,7 +739,9 @@ private void unknownValue(Object value, boolean ensureNoSelfReferences) throws I //Path implements Iterable and causes endless recursion and a StackOverFlow if treated as an Iterable here value((Path) value); } else if (value instanceof Map) { - map((Map) value, ensureNoSelfReferences); + @SuppressWarnings("unchecked") + final Map valueMap = (Map) value; + map(valueMap, ensureNoSelfReferences); } else if (value instanceof Iterable) { value((Iterable) value, ensureNoSelfReferences); } else if (value instanceof Object[]) { @@ -799,7 +800,7 @@ private XContentBuilder map(Map values, boolean ensureNoSelfReference // checks that the map does not contain references to itself because // iterating over map entries will cause a stackoverflow error if (ensureNoSelfReferences) { - CollectionUtils.ensureNoSelfReferences(values); + ensureNoSelfReferences(values); } startObject(); @@ -828,7 +829,7 @@ private XContentBuilder value(Iterable values, boolean ensureNoSelfReferences // checks that the iterable does not contain references to itself because // iterating over entries will cause a stackoverflow error if (ensureNoSelfReferences) { - CollectionUtils.ensureNoSelfReferences(values); + ensureNoSelfReferences(values); } startArray(); for (Object value : values) { @@ -937,4 +938,39 @@ static void ensureNotNull(Object value, String message) { throw new IllegalArgumentException(message); } } + + private static void ensureNoSelfReferences(Object value) { + Iterable it = convert(value); + if (it != null) { + ensureNoSelfReferences(it, value, Collections.newSetFromMap(new IdentityHashMap<>())); + } + } + + private static Iterable convert(Object value) { + if (value == null) { + return null; + } + if (value instanceof Map) { + return ((Map) value).values(); + } else if ((value instanceof Iterable) && (value instanceof Path == false)) { + return (Iterable) value; + } else if (value instanceof Object[]) { + return Arrays.asList((Object[]) value); + } else { + return null; + } + } + + private static void ensureNoSelfReferences(final Iterable value, Object originalReference, final Set ancestors) { + if (value != null) { + if (ancestors.add(originalReference) == false) { + throw new IllegalArgumentException("Iterable object is self-referencing itself"); + } + for (Object o : value) { + ensureNoSelfReferences(convert(o), o, ancestors); + } + ancestors.remove(originalReference); + } + } + } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentBuilderExtension.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index f9faa6f2b0658..fb871590df7fd 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -21,7 +21,6 @@ import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.cbor.CborXContent; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.smile.SmileXContent; @@ -154,7 +153,8 @@ public static XContentType xContentType(CharSequence content) { return XContentType.JSON; } // Should we throw a failure here? Smile idea is to use it in bytes.... - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && content.charAt(2) == SmileConstants.HEADER_BYTE_3) { + if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && content.charAt(1) == SmileConstants.HEADER_BYTE_2 && + content.charAt(2) == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && content.charAt(1) == '-' && content.charAt(2) == '-') { @@ -186,7 +186,7 @@ public static XContentType xContentType(CharSequence content) { public static XContent xContent(CharSequence content) { XContentType type = xContentType(content); if (type == null) { - throw new ElasticsearchParseException("Failed to derive xcontent"); + throw new XContentParseException("Failed to derive xcontent"); } return xContent(type); } @@ -213,7 +213,7 @@ public static XContent xContent(byte[] data) { public static XContent xContent(byte[] data, int offset, int length) { XContentType type = xContentType(data, offset, length); if (type == null) { - throw new ElasticsearchParseException("Failed to derive xcontent"); + throw new XContentParseException("Failed to derive xcontent"); } return xContent(type); } @@ -278,7 +278,8 @@ public static XContentType xContentType(byte[] bytes, int offset, int length) { if (first == '{') { return XContentType.JSON; } - if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { + if (length > 2 && first == SmileConstants.HEADER_BYTE_1 && bytes[offset + 1] == SmileConstants.HEADER_BYTE_2 && + bytes[offset + 2] == SmileConstants.HEADER_BYTE_3) { return XContentType.SMILE; } if (length > 2 && first == '-' && bytes[offset + 1] == '-' && bytes[offset + 2] == '-') { diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java similarity index 66% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java index 905e511b64a49..142c1e399c78c 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentGenerator.java @@ -103,6 +103,57 @@ public interface XContentGenerator extends Closeable, Flushable { void copyCurrentStructure(XContentParser parser) throws IOException; + default void copyCurrentEvent(XContentParser parser) throws IOException { + switch (parser.currentToken()) { + case START_OBJECT: + writeStartObject(); + break; + case END_OBJECT: + writeEndObject(); + break; + case START_ARRAY: + writeStartArray(); + break; + case END_ARRAY: + writeEndArray(); + break; + case FIELD_NAME: + writeFieldName(parser.currentName()); + break; + case VALUE_STRING: + if (parser.hasTextCharacters()) { + writeString(parser.textCharacters(), parser.textOffset(), parser.textLength()); + } else { + writeString(parser.text()); + } + break; + case VALUE_NUMBER: + switch (parser.numberType()) { + case INT: + writeNumber(parser.intValue()); + break; + case LONG: + writeNumber(parser.longValue()); + break; + case FLOAT: + writeNumber(parser.floatValue()); + break; + case DOUBLE: + writeNumber(parser.doubleValue()); + break; + } + break; + case VALUE_BOOLEAN: + writeBoolean(parser.booleanValue()); + break; + case VALUE_NULL: + writeNull(); + break; + case VALUE_EMBEDDED_OBJECT: + writeBinary(parser.binaryValue()); + } + } + /** * Returns {@code true} if this XContentGenerator has been closed. A closed generator can not do any more output. */ diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParseException.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentType.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/XContentType.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentType.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java index 58a9e9a98f833..34653e5634ab8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContent.java @@ -23,12 +23,12 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.dataformat.cbor.CBORFactory; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentGenerator; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -70,7 +70,7 @@ public XContentType type() { @Override public byte streamSeparator() { - throw new ElasticsearchParseException("cbor does not support stream parsing..."); + throw new XContentParseException("cbor does not support stream parsing..."); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/cbor/CborXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContent.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java similarity index 84% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java index 667a399096fd4..6f09174a573eb 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentGenerator.java @@ -28,16 +28,15 @@ import com.fasterxml.jackson.core.util.DefaultIndenter; import com.fasterxml.jackson.core.util.DefaultPrettyPrinter; import com.fasterxml.jackson.core.util.JsonGeneratorDelegate; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; -import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.filtering.FilterPathBasedFilter; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.BufferedInputStream; import java.io.IOException; @@ -325,7 +324,7 @@ public void writeRawField(String name, InputStream content, XContentType content } else { writeStartRaw(name); flush(); - Streams.copy(content, os); + copyStream(content, os); writeEndRaw(); } } @@ -393,7 +392,40 @@ public void copyCurrentStructure(XContentParser parser) throws IOException { if (parser instanceof JsonXContentParser) { generator.copyCurrentStructure(((JsonXContentParser) parser).parser); } else { - XContentHelper.copyCurrentStructure(this, parser); + copyCurrentStructure(this, parser); + } + } + + /** + * Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}. + */ + private static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException { + XContentParser.Token token = parser.currentToken(); + + // Let's handle field-name separately first + if (token == XContentParser.Token.FIELD_NAME) { + destination.writeFieldName(parser.currentName()); + token = parser.nextToken(); + // fall-through to copy the associated value + } + + switch (token) { + case START_ARRAY: + destination.writeStartArray(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + copyCurrentStructure(destination, parser); + } + destination.writeEndArray(); + break; + case START_OBJECT: + destination.writeStartObject(); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + copyCurrentStructure(destination, parser); + } + destination.writeEndObject(); + break; + default: // others are simple: + destination.copyCurrentEvent(parser); } } @@ -423,4 +455,37 @@ public void close() throws IOException { public boolean isClosed() { return generator.isClosed(); } + + /** + * Copy the contents of the given InputStream to the given OutputStream. + * Closes both streams when done. + * + * @param in the stream to copy from + * @param out the stream to copy to + * @return the number of bytes copied + * @throws IOException in case of I/O errors + */ + private static long copyStream(InputStream in, OutputStream out) throws IOException { + Objects.requireNonNull(in, "No InputStream specified"); + Objects.requireNonNull(out, "No OutputStream specified"); + final byte[] buffer = new byte[8192]; + boolean success = false; + try { + long byteCount = 0; + int bytesRead; + while ((bytesRead = in.read(buffer)) != -1) { + out.write(buffer, 0, bytesRead); + byteCount += bytesRead; + } + out.flush(); + success = true; + return byteCount; + } finally { + if (success) { + IOUtils.close(in, out); + } else { + IOUtils.closeWhileHandlingException(in, out); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java similarity index 98% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java index caf6488eea398..5040f81cc130a 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContent.java @@ -53,7 +53,8 @@ public static XContentBuilder contentBuilder() throws IOException { static { smileFactory = new SmileFactory(); - smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); // for now, this is an overhead, might make sense for web sockets + // for now, this is an overhead, might make sense for web sockets + smileFactory.configure(SmileGenerator.Feature.ENCODE_BINARY_AS_7BIT, false); smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now... // Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/smile/SmileXContentParser.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java similarity index 90% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 008dca1b537ca..69d6736cea761 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -19,14 +19,15 @@ package org.elasticsearch.common.xcontent.support; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Numbers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; import java.nio.CharBuffer; import java.util.ArrayList; import java.util.HashMap; @@ -178,6 +179,34 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; + /** Return the long that {@code stringValue} stores or throws an exception if the + * stored value cannot be converted to a long that stores the exact same + * value and {@code coerce} is false. */ + private static long toLong(String stringValue, boolean coerce) { + try { + return Long.parseLong(stringValue); + } catch (NumberFormatException e) { + // we will try again with BigDecimal + } + + final BigInteger bigIntegerValue; + try { + BigDecimal bigDecimalValue = new BigDecimal(stringValue); + bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); + } catch (ArithmeticException e) { + throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("For input string: \"" + stringValue + "\""); + } + + if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 || + bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } + + return bigIntegerValue.longValue(); + } + @Override public long longValue() throws IOException { return longValue(DEFAULT_NUMBER_COERCE_POLICY); @@ -188,7 +217,7 @@ public long longValue(boolean coerce) throws IOException { Token token = currentToken(); if (token == Token.VALUE_STRING) { checkCoerceString(coerce, Long.class); - return Numbers.toLong(text(), coerce); + return toLong(text(), coerce); } long result = doLongValue(); ensureNumberConversion(coerce, result, Long.class); @@ -369,7 +398,7 @@ static List readList(XContentParser parser, MapFactory mapFactory) throw if (token == XContentParser.Token.START_ARRAY) { token = parser.nextToken(); } else { - throw new ElasticsearchParseException("Failed to parse list: expecting " + throw new XContentParseException(parser.getTokenLocation(), "Failed to parse list: expecting " + XContentParser.Token.START_ARRAY + " but got " + token); } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java index a70e385d52062..cd62280badbab 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPath.java @@ -20,7 +20,7 @@ package org.elasticsearch.common.xcontent.support.filtering; -import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.Glob; import java.util.ArrayList; import java.util.List; @@ -49,7 +49,7 @@ private FilterPath() { } public FilterPath matchProperty(String name) { - if ((next != null) && (simpleWildcard || doubleWildcard || Regex.simpleMatch(segment, name))) { + if ((next != null) && (simpleWildcard || doubleWildcard || Glob.globMatch(segment, name))) { return next; } return null; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java similarity index 97% rename from server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java index 846e172ae6678..5bce9e10c9609 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilterPathBasedFilter.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.xcontent.support.filtering; import com.fasterxml.jackson.core.filter.TokenFilter; -import org.elasticsearch.common.util.CollectionUtils; import java.util.ArrayList; import java.util.List; @@ -47,7 +46,7 @@ public class FilterPathBasedFilter extends TokenFilter { private final boolean inclusive; public FilterPathBasedFilter(FilterPath[] filters, boolean inclusive) { - if (CollectionUtils.isEmpty(filters)) { + if (filters == null || filters.length == 0) { throw new IllegalArgumentException("filters cannot be null or empty"); } this.inclusive = inclusive; diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContent.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentGenerator.java diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/yaml/YamlXContentParser.java diff --git a/server/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/ParseFieldTests.java similarity index 100% rename from server/src/test/java/org/elasticsearch/common/ParseFieldTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/ParseFieldTests.java diff --git a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java similarity index 99% rename from server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 1f38116f2f7c7..fe41352741e71 100644 --- a/server/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -123,7 +123,7 @@ private void assertReadListThrowsException(String source) { readList(source); fail("should have thrown a parse exception"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchParseException.class)); + assertThat(e, instanceOf(XContentParseException.class)); assertThat(e.getMessage(), containsString("Failed to parse list")); } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index d9b89ba339a0c..3ee163c8fc5a3 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -349,7 +349,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, document)) { parser.nextToken(); - XContentHelper.copyCurrentStructure(builder.generator(), parser); + builder.generator().copyCurrentStructure(parser); } } builder.endArray(); diff --git a/server/build.gradle b/server/build.gradle index 7b30f57d885e8..6042fb65ba021 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -63,6 +63,7 @@ dependencies { compile "org.elasticsearch:elasticsearch-core:${version}" compile "org.elasticsearch:elasticsearch-secure-sm:${version}" + compile "org.elasticsearch:elasticsearch-x-content:${version}" compileOnly project(':libs:plugin-classloader') testRuntime project(':libs:plugin-classloader') @@ -91,13 +92,6 @@ dependencies { // time handling, remove with java 8 time compile 'joda-time:joda-time:2.9.9' - // json and yaml - compile "org.yaml:snakeyaml:${versions.snakeyaml}" - compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-smile:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}" - compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" - // percentiles aggregation compile 'com.tdunning:t-digest:3.2' // precentil ranks aggregation @@ -295,7 +289,6 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) { dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' - mapping from: /jackson-.*/, to: 'jackson' dependencies = project.configurations.runtime.fileCollection { it.group.startsWith('org.elasticsearch') == false || // keep the following org.elasticsearch jars in diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java b/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java deleted file mode 100644 index 21c0ea5fdd08b..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/xcontent/Booleans.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.xcontent; - -/** - * Helpers for dealing with boolean values. Package-visible only so that only XContent classes use them. - */ -final class Booleans { - /** - * Parse {@code value} with values "true", "false", or null, returning the - * default value if null or the empty string is used. Any other input - * results in an {@link IllegalArgumentException} being thrown. - */ - static boolean parseBoolean(String value, Boolean defaultValue) { - if (value != null && value.length() > 0) { - switch (value) { - case "true": - return true; - case "false": - return false; - default: - throw new IllegalArgumentException("Failed to parse param [" + value + "] as only [true] or [false] are allowed."); - } - } else { - return defaultValue; - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 6501f899c47bf..9c01c094b7a0d 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -287,90 +287,6 @@ private static boolean allListValuesAreMapsOfOne(List list) { return true; } - /** - * Low level implementation detail of {@link XContentGenerator#copyCurrentStructure(XContentParser)}. - */ - public static void copyCurrentStructure(XContentGenerator destination, XContentParser parser) throws IOException { - XContentParser.Token token = parser.currentToken(); - - // Let's handle field-name separately first - if (token == XContentParser.Token.FIELD_NAME) { - destination.writeFieldName(parser.currentName()); - token = parser.nextToken(); - // fall-through to copy the associated value - } - - switch (token) { - case START_ARRAY: - destination.writeStartArray(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - copyCurrentStructure(destination, parser); - } - destination.writeEndArray(); - break; - case START_OBJECT: - destination.writeStartObject(); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - copyCurrentStructure(destination, parser); - } - destination.writeEndObject(); - break; - default: // others are simple: - copyCurrentEvent(destination, parser); - } - } - - public static void copyCurrentEvent(XContentGenerator generator, XContentParser parser) throws IOException { - switch (parser.currentToken()) { - case START_OBJECT: - generator.writeStartObject(); - break; - case END_OBJECT: - generator.writeEndObject(); - break; - case START_ARRAY: - generator.writeStartArray(); - break; - case END_ARRAY: - generator.writeEndArray(); - break; - case FIELD_NAME: - generator.writeFieldName(parser.currentName()); - break; - case VALUE_STRING: - if (parser.hasTextCharacters()) { - generator.writeString(parser.textCharacters(), parser.textOffset(), parser.textLength()); - } else { - generator.writeString(parser.text()); - } - break; - case VALUE_NUMBER: - switch (parser.numberType()) { - case INT: - generator.writeNumber(parser.intValue()); - break; - case LONG: - generator.writeNumber(parser.longValue()); - break; - case FLOAT: - generator.writeNumber(parser.floatValue()); - break; - case DOUBLE: - generator.writeNumber(parser.doubleValue()); - break; - } - break; - case VALUE_BOOLEAN: - generator.writeBoolean(parser.booleanValue()); - break; - case VALUE_NULL: - generator.writeNull(); - break; - case VALUE_EMBEDDED_OBJECT: - generator.writeBinary(parser.binaryValue()); - } - } - /** * Writes a "raw" (bytes) field, handling cases where the bytes are compressed, and tries to optimize writing using * {@link XContentBuilder#rawField(String, InputStream)}. diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index 95bfea87f8b26..737bad8ee5b0c 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -47,7 +48,7 @@ public final class PipelineConfiguration extends AbstractDiffable { XContentBuilder contentBuilder = XContentBuilder.builder(parser.contentType().xContent()); - XContentHelper.copyCurrentStructure(contentBuilder.generator(), parser); + contentBuilder.generator().copyCurrentStructure(parser); builder.setConfig(BytesReference.bytes(contentBuilder), contentBuilder.contentType()); }, new ParseField("config"), ObjectParser.ValueType.OBJECT); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 0037c23656f6c..04ac1d6cda026 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -25,7 +25,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanBoostQuery; import org.apache.lucene.util.Accountable; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -56,6 +55,7 @@ import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentGenerator; @@ -63,6 +63,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; @@ -425,7 +426,7 @@ static List> alterateQueries(Set queries, Set> alterateQueries(Set queries, Set Date: Mon, 2 Apr 2018 20:20:01 -0400 Subject: [PATCH 34/68] Remove HTTP max content length leniency (#29337) I am not sure why we have this leniency for HTTP max content length, it has been there since the beginning (5ac51ee93feab6c75fcbe979b9bb338962622c2e) with no explanation of its source. That said, our philosophy today is different than the philosophy of the past where Elasticsearch would be quite lenient in its handling of settings and today we aim for predictability for both users and us. This commit removes leniency in the parsing of http.max_content_length. --- docs/reference/modules/http.asciidoc | 2 +- .../http/netty4/Netty4HttpServerTransport.java | 5 ----- .../java/org/elasticsearch/http/HttpTransportSettings.java | 7 ++++++- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index a83270ec2aace..920f62043cfe2 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -39,7 +39,7 @@ from the outside. Defaults to the actual port assigned via `http.port`. |`http.host` |Used to set the `http.bind_host` and the `http.publish_host` Defaults to `http.host` or `network.host`. |`http.max_content_length` |The max content of an HTTP request. Defaults to -`100mb`. If set to greater than `Integer.MAX_VALUE`, it will be reset to 100mb. +`100mb`. |`http.max_initial_line_length` |The max length of an HTTP URL. Defaults to `4kb` diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 31b32a8ab948e..ab0c271f3ae4f 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -233,11 +233,6 @@ public Netty4HttpServerTransport(Settings settings, NetworkService networkServic this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.corsConfig = buildCorsConfig(settings); - // validate max content length - if (maxContentLength.getBytes() > Integer.MAX_VALUE) { - logger.warn("maxContentLength[{}] set to high value, resetting it to [100mb]", maxContentLength); - maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB); - } this.maxContentLength = maxContentLength; logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " + diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 315fa5b038bfd..064406f0d389d 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -83,7 +83,12 @@ public final class HttpTransportSettings { return true; }, Property.NodeScope, Property.Deprecated); public static final Setting SETTING_HTTP_MAX_CONTENT_LENGTH = - Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), Property.NodeScope); + Setting.byteSizeSetting( + "http.max_content_length", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(0, ByteSizeUnit.BYTES), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES), + Property.NodeScope); public static final Setting SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = From 782e41a67e10a4f1a7bb2bfebbdfd5c68f6385c7 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 2 Apr 2018 21:34:01 -0700 Subject: [PATCH 35/68] Painless: Remove extraneous INLINE constant. (#29340) --- .../org/elasticsearch/painless/Location.java | 56 ++++++++----------- .../painless/PainlessScript.java | 9 +-- .../painless/PainlessScriptEngine.java | 9 +-- .../elasticsearch/painless/antlr/Walker.java | 2 +- .../elasticsearch/painless/node/SSource.java | 2 +- 5 files changed, 27 insertions(+), 51 deletions(-) diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java index f64200d972996..d90baa0655116 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Location.java @@ -27,9 +27,9 @@ public final class Location { private final String sourceName; private final int offset; - + /** - * Create a new Location + * Create a new Location * @param sourceName script's name * @param offset character offset of script element */ @@ -37,7 +37,7 @@ public Location(String sourceName, int offset) { this.sourceName = Objects.requireNonNull(sourceName); this.offset = offset; } - + /** * Return the script's name */ @@ -68,43 +68,31 @@ public RuntimeException createError(RuntimeException exception) { // This maximum length is theoretically 65535 bytes, but as it's CESU-8 encoded we don't know how large it is in bytes, so be safe private static final int MAX_NAME_LENGTH = 256; - + /** Computes the file name (mostly important for stacktraces) */ - public static String computeSourceName(String scriptName, String source) { + public static String computeSourceName(String scriptName) { StringBuilder fileName = new StringBuilder(); - if (scriptName.equals(PainlessScriptEngine.INLINE_NAME)) { - // its an anonymous script, include at least a portion of the source to help identify which one it is - // but don't create stacktraces with filenames that contain newlines or huge names. + // its an anonymous script, include at least a portion of the source to help identify which one it is + // but don't create stacktraces with filenames that contain newlines or huge names. - // truncate to the first newline - int limit = source.indexOf('\n'); - if (limit >= 0) { - int limit2 = source.indexOf('\r'); - if (limit2 >= 0) { - limit = Math.min(limit, limit2); - } - } else { - limit = source.length(); + // truncate to the first newline + int limit = scriptName.indexOf('\n'); + if (limit >= 0) { + int limit2 = scriptName.indexOf('\r'); + if (limit2 >= 0) { + limit = Math.min(limit, limit2); } + } else { + limit = scriptName.length(); + } - // truncate to our limit - limit = Math.min(limit, MAX_NAME_LENGTH); - fileName.append(source, 0, limit); + // truncate to our limit + limit = Math.min(limit, MAX_NAME_LENGTH); + fileName.append(scriptName, 0, limit); - // if we truncated, make it obvious - if (limit != source.length()) { - fileName.append(" ..."); - } - fileName.append(" @ "); - } else { - // its a named script, just use the name - // but don't trust this has a reasonable length! - if (scriptName.length() > MAX_NAME_LENGTH) { - fileName.append(scriptName, 0, MAX_NAME_LENGTH); - fileName.append(" ..."); - } else { - fileName.append(scriptName); - } + // if we truncated, make it obvious + if (limit != scriptName.length()) { + fileName.append(" ..."); } return fileName.toString(); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java index 9aab5c438b030..6139e66160ee6 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScript.java @@ -91,14 +91,7 @@ default ScriptException convertToScriptException(Throwable t, Map> entry : extraMetadata.entrySet()) { scriptException.addMetadata(entry.getKey(), entry.getValue()); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 95a38bf22c653..339e58c763c78 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -119,11 +119,6 @@ public String getType() { return NAME; } - /** - * When a script is anonymous (inline), we give it this name. - */ - static final String INLINE_NAME = ""; - @Override public T compile(String scriptName, String scriptSource, ScriptContext context, Map params) { Compiler compiler = contextsToCompilers.get(context); @@ -425,7 +420,7 @@ public Loader run() { return AccessController.doPrivileged(new PrivilegedAction() { @Override public Object run() { - String name = scriptName == null ? INLINE_NAME : scriptName; + String name = scriptName == null ? source : scriptName; Constructor constructor = compiler.compile(loader, new MainMethodReserved(), name, source, compilerSettings); try { @@ -488,7 +483,7 @@ void compile(Compiler compiler, Loader loader, MainMethodReserved reserved, AccessController.doPrivileged(new PrivilegedAction() { @Override public Void run() { - String name = scriptName == null ? INLINE_NAME : scriptName; + String name = scriptName == null ? source : scriptName; compiler.compile(loader, reserved, name, source, compilerSettings); return null; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java index 3e1c2ff2db153..a15f87966eae2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Walker.java @@ -198,7 +198,7 @@ private Walker(ScriptClassInfo scriptClassInfo, MainMethodReserved reserved, Str this.reserved.push(reserved); this.debugStream = debugStream; this.settings = settings; - this.sourceName = Location.computeSourceName(sourceName, sourceText); + this.sourceName = Location.computeSourceName(sourceName); this.sourceText = sourceText; this.globals = new Globals(new BitSet(sourceText.length())); this.definition = definition; diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java index 69f6b1736a5ee..efb6db278140d 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSource.java @@ -249,7 +249,7 @@ public void write() { } visitor.visit(WriterConstants.CLASS_VERSION, classAccess, className, null, Type.getType(scriptClassInfo.getBaseClass()).getInternalName(), classInterfaces); - visitor.visitSource(Location.computeSourceName(name, source), null); + visitor.visitSource(Location.computeSourceName(name), null); // Write the a method to bootstrap def calls MethodWriter bootstrapDef = new MethodWriter(Opcodes.ACC_STATIC | Opcodes.ACC_VARARGS, DEF_BOOTSTRAP_METHOD, visitor, From 3bdfc8f3fb26d5c149c9a1a5f944a280ccbebe37 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 3 Apr 2018 09:27:14 +0200 Subject: [PATCH 36/68] Upgrade to lucene-7.3.0-snapshot-98a6b3d. (#29298) Most notable changes include: - this release doesn't have the 7.2.1 version constant so I had to create one - spatial4j and jts were upgraded --- .../gradle/plugin/PluginBuildPlugin.groovy | 2 +- buildSrc/version.properties | 6 +- docs/Versions.asciidoc | 4 +- .../query-dsl/geo-shape-query.asciidoc | 10 +- .../common/TrimTokenFilterFactory.java | 8 +- .../lucene-expressions-7.2.1.jar.sha1 | 1 - ...xpressions-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-analyzers-icu-7.2.1.jar.sha1 | 1 - ...lyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-analyzers-kuromoji-7.2.1.jar.sha1 | 1 - ...s-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-analyzers-phonetic-7.2.1.jar.sha1 | 1 - ...s-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-analyzers-smartcn-7.2.1.jar.sha1 | 1 - ...rs-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-analyzers-stempel-7.2.1.jar.sha1 | 1 - ...rs-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + ...lucene-analyzers-morfologik-7.2.1.jar.sha1 | 1 - ...morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/build.gradle | 13 +- server/licenses/jts-1.13.jar.sha1 | 1 - server/licenses/jts-LICENSE.txt | 165 ------------------ server/licenses/jts-core-1.15.0.jar.sha1 | 1 + server/licenses/jts-core-LICENSE.txt | 31 ++++ .../{jts-NOTICE.txt => jts-core-NOTICE.txt} | 0 .../lucene-analyzers-common-7.2.1.jar.sha1 | 1 - ...ers-common-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-backward-codecs-7.2.1.jar.sha1 | 1 - ...ard-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-core-7.2.1.jar.sha1 | 1 - ...ucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../licenses/lucene-grouping-7.2.1.jar.sha1 | 1 - ...e-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-highlighter-7.2.1.jar.sha1 | 1 - ...ighlighter-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-join-7.2.1.jar.sha1 | 1 - ...ucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-memory-7.2.1.jar.sha1 | 1 - ...ene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-misc-7.2.1.jar.sha1 | 1 - ...ucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-queries-7.2.1.jar.sha1 | 1 - ...ne-queries-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-queryparser-7.2.1.jar.sha1 | 1 - ...ueryparser-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-sandbox-7.2.1.jar.sha1 | 1 - ...ne-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-spatial-7.2.1.jar.sha1 | 1 - ...ne-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../lucene-spatial-extras-7.2.1.jar.sha1 | 1 - ...ial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + .../licenses/lucene-spatial3d-7.2.1.jar.sha1 | 1 - ...-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/lucene-suggest-7.2.1.jar.sha1 | 1 - ...ne-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 | 1 + server/licenses/spatial4j-0.6.jar.sha1 | 1 - server/licenses/spatial4j-0.7.jar.sha1 | 1 + .../main/java/org/elasticsearch/Version.java | 16 +- .../common/geo/GeoShapeType.java | 2 +- .../common/geo/ShapesAvailability.java | 2 +- .../common/geo/builders/CircleBuilder.java | 2 +- .../geo/builders/CoordinatesBuilder.java | 2 +- .../common/geo/builders/EnvelopeBuilder.java | 2 +- .../geo/builders/LineStringBuilder.java | 8 +- .../geo/builders/MultiLineStringBuilder.java | 6 +- .../geo/builders/MultiPointBuilder.java | 2 +- .../geo/builders/MultiPolygonBuilder.java | 2 +- .../common/geo/builders/PointBuilder.java | 2 +- .../common/geo/builders/PolygonBuilder.java | 12 +- .../common/geo/builders/ShapeBuilder.java | 6 +- .../common/geo/parsers/CoordinateNode.java | 2 +- .../common/geo/parsers/GeoJsonParser.java | 2 +- .../common/geo/parsers/GeoWKTParser.java | 2 +- .../CustomPassageFormatterTests.java | 8 +- .../common/geo/BaseGeoParsingTestCase.java | 4 +- .../common/geo/GeoJsonShapeParserTests.java | 12 +- .../common/geo/GeoWKTShapeParserTests.java | 12 +- .../common/geo/ShapeBuilderTests.java | 6 +- .../geo/builders/CircleBuilderTests.java | 2 +- .../geo/builders/EnvelopeBuilderTests.java | 2 +- .../geo/builders/LineStringBuilderTests.java | 2 +- .../builders/MultiLineStringBuilderTests.java | 2 +- .../geo/builders/MultiPointBuilderTests.java | 2 +- .../geo/builders/PointBuilderTests.java | 2 +- .../geo/builders/PolygonBuilderTests.java | 2 +- .../query/GeoPolygonQueryBuilderTests.java | 2 +- .../query/GeoShapeQueryBuilderTests.java | 2 +- .../search/geo/GeoShapeQueryTests.java | 2 +- .../test/geo/RandomShapeGenerator.java | 6 +- .../hamcrest/ElasticsearchGeoAssertions.java | 14 +- .../analysis/AnalysisFactoryTestCase.java | 7 +- 91 files changed, 164 insertions(+), 282 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/jts-1.13.jar.sha1 delete mode 100644 server/licenses/jts-LICENSE.txt create mode 100644 server/licenses/jts-core-1.15.0.jar.sha1 create mode 100644 server/licenses/jts-core-LICENSE.txt rename server/licenses/{jts-NOTICE.txt => jts-core-NOTICE.txt} (100%) delete mode 100644 server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-core-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-join-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-memory-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-misc-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-queries-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-7.2.1.jar.sha1 create mode 100644 server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 delete mode 100644 server/licenses/spatial4j-0.6.jar.sha1 create mode 100644 server/licenses/spatial4j-0.7.jar.sha1 diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index f802a2895909e..80cb376077ed1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -95,7 +95,7 @@ public class PluginBuildPlugin extends BuildPlugin { // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps compileOnly "org.locationtech.spatial4j:spatial4j:${project.versions.spatial4j}" - compileOnly "com.vividsolutions:jts:${project.versions.jts}" + compileOnly "org.locationtech.jts:jts-core:${project.versions.jts}" compileOnly "org.apache.logging.log4j:log4j-api:${project.versions.log4j}" compileOnly "org.apache.logging.log4j:log4j-core:${project.versions.log4j}" compileOnly "org.elasticsearch:jna:${project.versions.jna}" diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fabcadabd9f96..e064b2f223cb6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,9 +1,9 @@ elasticsearch = 7.0.0-alpha1 -lucene = 7.2.1 +lucene = 7.3.0-snapshot-98a6b3d # optional dependencies -spatial4j = 0.6 -jts = 1.13 +spatial4j = 0.7 +jts = 1.15.0 jackson = 2.8.10 snakeyaml = 1.17 # when updating log4j, please update also docs/java-api/index.asciidoc diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 1c55e3b8a4e55..9f7fdc9ea2f17 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 7.0.0-alpha1 :major-version: 7.x -:lucene_version: 7.2.1 -:lucene_version_path: 7_2_1 +:lucene_version: 7.3.0 +:lucene_version_path: 7_3_0 :branch: master :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/docs/java-api/query-dsl/geo-shape-query.asciidoc b/docs/java-api/query-dsl/geo-shape-query.asciidoc index c8084c5ea9fd6..803f1849b5cdf 100644 --- a/docs/java-api/query-dsl/geo-shape-query.asciidoc +++ b/docs/java-api/query-dsl/geo-shape-query.asciidoc @@ -12,13 +12,13 @@ to your classpath in order to use this type: org.locationtech.spatial4j spatial4j - 0.6 <1> + 0.7 <1> - com.vividsolutions - jts - 1.13 <2> + org.locationtech.jts + jts-core + 1.15.0 <2> xerces @@ -28,7 +28,7 @@ to your classpath in order to use this type: ----------------------------------------------- <1> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.spatial4j%22%20AND%20a%3A%22spatial4j%22[Maven Central] -<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22com.vividsolutions%22%20AND%20a%3A%22jts%22[Maven Central] +<2> check for updates in http://search.maven.org/#search%7Cga%7C1%7Cg%3A%22org.locationtech.jts%22%20AND%20a%3A%22jts-core%22[Maven Central] [source,java] -------------------------------------------------- diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java index ab82ba0f7eb42..1412a99f41f44 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/TrimTokenFilterFactory.java @@ -25,8 +25,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; +import org.elasticsearch.index.analysis.MultiTermAwareComponent; -public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { +public class TrimTokenFilterFactory extends AbstractTokenFilterFactory implements MultiTermAwareComponent { private static final String UPDATE_OFFSETS_KEY = "update_offsets"; @@ -41,4 +42,9 @@ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { public TokenStream create(TokenStream tokenStream) { return new TrimFilter(tokenStream); } + + @Override + public Object getMultiTermComponent() { + return this; + } } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 deleted file mode 100644 index a57efa8c26aa6..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51fbb33cdb17bb36a0e86485685bba18eb1c2ccf \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..a92cbe3045071 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +38ff5a1f4bcbfb6e1ffacd3263175c2a1ba23e9f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 deleted file mode 100644 index fb8e4b0167bf5..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cfdfcd54c052cdd08140c7cd4daa7929b9657da0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..49aa857cf9429 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +ece1b4232697fad170c589f0df887efa6e66dd4f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 deleted file mode 100644 index f8c67b9480380..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21418892a16434ecb4f8efdbf4e62838f58a6a59 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..16f43319ded3a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +a16521e8f7240a9b93ea8ced157298b9d18bca43 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 deleted file mode 100644 index 2443de6a49b0a..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -970e860a6e252e7c1dc117c45176a847ce961ffc \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..e86c0765b3868 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +0dc6db8e16bf1ed6ebaa914fcbfbb4970af23747 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 deleted file mode 100644 index 1c301d32445ec..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec08375a8392720cc378995d8234cd6138a735f6 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..b6f58cf3fe622 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +de43b057e8800f6c7b26907035664feb686127af \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 deleted file mode 100644 index 4833879967b8e..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -58305876f7fb0fbfad288910378cf4770da43892 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..cac837ab4a6fc --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +c5e6a6d99a04ea5121bfd77470a7818725516ead \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 deleted file mode 100644 index dc33291c7a3cb..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -51cf40e2606863840e52d7e8981314a5a0323e06 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..909569fec9c95 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +d755dcef8763b783b7cbba7154a62f91e413007c \ No newline at end of file diff --git a/server/build.gradle b/server/build.gradle index 6042fb65ba021..ab74520106da9 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -99,7 +99,7 @@ dependencies { // lucene spatial compile "org.locationtech.spatial4j:spatial4j:${versions.spatial4j}", optional - compile "com.vividsolutions:jts:${versions.jts}", optional + compile "org.locationtech.jts:jts-core:${versions.jts}", optional // logging compile "org.apache.logging.log4j:log4j-api:${versions.log4j}" @@ -281,6 +281,17 @@ thirdPartyAudit.excludes = [ // from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j) 'org.noggit.JSONParser', + + // from lucene-spatial + 'com.fasterxml.jackson.databind.JsonSerializer', + 'com.fasterxml.jackson.databind.JsonDeserializer', + 'com.fasterxml.jackson.databind.node.ArrayNode', + 'com.google.common.geometry.S2Cell', + 'com.google.common.geometry.S2CellId', + 'com.google.common.geometry.S2Projections', + 'com.google.common.geometry.S2Point', + 'com.google.common.geometry.S2$Metric', + 'com.google.common.geometry.S2LatLng', ] if (JavaVersion.current() > JavaVersion.VERSION_1_8) { diff --git a/server/licenses/jts-1.13.jar.sha1 b/server/licenses/jts-1.13.jar.sha1 deleted file mode 100644 index 5b9e3902cf493..0000000000000 --- a/server/licenses/jts-1.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ccfb9b60f04d71add996a666ceb8902904fd805 \ No newline at end of file diff --git a/server/licenses/jts-LICENSE.txt b/server/licenses/jts-LICENSE.txt deleted file mode 100644 index 65c5ca88a67c3..0000000000000 --- a/server/licenses/jts-LICENSE.txt +++ /dev/null @@ -1,165 +0,0 @@ - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. - - 0. Additional Definitions. - - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. - - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. - - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. - - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". - - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. - - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. - - 1. Exception to Section 3 of the GNU GPL. - - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. - - 2. Conveying Modified Versions. - - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: - - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or - - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. - - 3. Object Code Incorporating Material from Library Header Files. - - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: - - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the object code with a copy of the GNU GPL and this license - document. - - 4. Combined Works. - - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: - - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. - - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. - - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. - - d) Do one of the following: - - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. - - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. - - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) - - 5. Combined Libraries. - - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: - - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. - - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. - - 6. Revised Versions of the GNU Lesser General Public License. - - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. - - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. - - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. diff --git a/server/licenses/jts-core-1.15.0.jar.sha1 b/server/licenses/jts-core-1.15.0.jar.sha1 new file mode 100644 index 0000000000000..32e262511c0ef --- /dev/null +++ b/server/licenses/jts-core-1.15.0.jar.sha1 @@ -0,0 +1 @@ +705981b7e25d05a76a3654e597dab6ba423eb79e \ No newline at end of file diff --git a/server/licenses/jts-core-LICENSE.txt b/server/licenses/jts-core-LICENSE.txt new file mode 100644 index 0000000000000..bc03db03a5926 --- /dev/null +++ b/server/licenses/jts-core-LICENSE.txt @@ -0,0 +1,31 @@ +Eclipse Distribution License - v 1.0 + +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + Neither the name of the Eclipse Foundation, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/server/licenses/jts-NOTICE.txt b/server/licenses/jts-core-NOTICE.txt similarity index 100% rename from server/licenses/jts-NOTICE.txt rename to server/licenses/jts-core-NOTICE.txt diff --git a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 b/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 deleted file mode 100644 index 5ffdd6b7ba4cf..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -324c3a090a04136720f4ef612db03b5c14866efa \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..c167b717385d5 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +a731424734fd976b409f1963ba88471caccc18aa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 b/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 deleted file mode 100644 index b166b97dd7c4d..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bc8dc9cc1555543532953d1dff33b67f849e19f9 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..cdaec87d35b28 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +5f8ad8c3f8c404803aa81a43ac6f732e19c00935 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.2.1.jar.sha1 b/server/licenses/lucene-core-7.2.1.jar.sha1 deleted file mode 100644 index e2fd2d7533737..0000000000000 --- a/server/licenses/lucene-core-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -91897dbbbbada95ccddbd90505f0a0ba6bf7c199 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..ecb3bb28e238c --- /dev/null +++ b/server/licenses/lucene-core-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +19b1a1fff6bb077e0660e4f0666807e24dd26865 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.2.1.jar.sha1 b/server/licenses/lucene-grouping-7.2.1.jar.sha1 deleted file mode 100644 index 7537cd21bf326..0000000000000 --- a/server/licenses/lucene-grouping-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5dbae570b1a4e54cd978fe5c3ed2d6b2f87be968 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..03f9bf1a4c87e --- /dev/null +++ b/server/licenses/lucene-grouping-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +94dd26d685ae981905b775780e6c824f723b14af \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 b/server/licenses/lucene-highlighter-7.2.1.jar.sha1 deleted file mode 100644 index 38837afb0a623..0000000000000 --- a/server/licenses/lucene-highlighter-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2f4b8c93563409cfebb36d910c4dab4910678689 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..32327ca414ddb --- /dev/null +++ b/server/licenses/lucene-highlighter-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +9783a0bb56fb8bbd17280d3def97a656999f6a88 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.2.1.jar.sha1 b/server/licenses/lucene-join-7.2.1.jar.sha1 deleted file mode 100644 index c2944aa323e2f..0000000000000 --- a/server/licenses/lucene-join-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3121a038d472f51087500dd6da9146a9b0031ae4 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..6b521d7de7fe1 --- /dev/null +++ b/server/licenses/lucene-join-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +01eda74d798af85f846ebd74f53ec7a16e6e2ba1 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.2.1.jar.sha1 b/server/licenses/lucene-memory-7.2.1.jar.sha1 deleted file mode 100644 index 543e123b2a733..0000000000000 --- a/server/licenses/lucene-memory-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21233b2baeed2aaa5acf8359bf8c4a90cc6bf553 \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..6bfaf1c715f89 --- /dev/null +++ b/server/licenses/lucene-memory-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +29b8b6324722dc6dda784731e3e918de9715422c \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.2.1.jar.sha1 b/server/licenses/lucene-misc-7.2.1.jar.sha1 deleted file mode 100644 index 2a9f649d7d527..0000000000000 --- a/server/licenses/lucene-misc-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0478fed6c474c95f6c0c678c04297a3df0c1687e \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..74d01520b6479 --- /dev/null +++ b/server/licenses/lucene-misc-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +e1ae49522164a721d67459e59792db6f4dff70fc \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.2.1.jar.sha1 b/server/licenses/lucene-queries-7.2.1.jar.sha1 deleted file mode 100644 index e0f2d575e8a2a..0000000000000 --- a/server/licenses/lucene-queries-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02135cf5047409ed1ca6cd098e802b30f9dbd1ff \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..172a57bed49fe --- /dev/null +++ b/server/licenses/lucene-queries-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +87595367717ddc9fbf95bbf649216a5d7954d9d7 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 b/server/licenses/lucene-queryparser-7.2.1.jar.sha1 deleted file mode 100644 index 56c5dbfa18678..0000000000000 --- a/server/licenses/lucene-queryparser-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a87d8b14d1c8045f61cb704955706f6681170be3 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..ac6aec921a30c --- /dev/null +++ b/server/licenses/lucene-queryparser-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +5befbb58ef76c79fc8afebbca781b01320b8ffad \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 b/server/licenses/lucene-sandbox-7.2.1.jar.sha1 deleted file mode 100644 index 9445acbdd87d8..0000000000000 --- a/server/licenses/lucene-sandbox-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc8dd132fd183791dc27591a69974f55b685d0d7 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..412b072e09d2e --- /dev/null +++ b/server/licenses/lucene-sandbox-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +3d7aa72ccec38ef902b149da36548fb227eeb58a \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-7.2.1.jar.sha1 deleted file mode 100644 index 8c1b3d01c2339..0000000000000 --- a/server/licenses/lucene-spatial-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09c4d96e6ea34292f7cd20c4ff1d16ff31eb7869 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..5c8d749cf978b --- /dev/null +++ b/server/licenses/lucene-spatial-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +ac1755a69f14c53f7846ef7d9b405d44caf53091 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 b/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 deleted file mode 100644 index 50422956651d3..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8aff7e8a5547c03d0c4e7e1b58cb30773bb1d7d5 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..09e57350f1cdd --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +9d2fa5db0ce9fb5a1b4e9f18d818b14e082ef5a0 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 b/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 deleted file mode 100644 index 85aae1cfdd053..0000000000000 --- a/server/licenses/lucene-spatial3d-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8b0db8ff795b31994ebe93779c450d17c612590d \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..e59ab0d054d0d --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +99aefdef8178e54f93b743452c5d36bf7e8b3a2d \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.2.1.jar.sha1 b/server/licenses/lucene-suggest-7.2.1.jar.sha1 deleted file mode 100644 index e46240d1c6287..0000000000000 --- a/server/licenses/lucene-suggest-7.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1c3804602e35589c21b0391fa7088ef012751a22 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 b/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 new file mode 100644 index 0000000000000..805298afb193e --- /dev/null +++ b/server/licenses/lucene-suggest-7.3.0-snapshot-98a6b3d.jar.sha1 @@ -0,0 +1 @@ +6257a8a1860ec5f57439c420637d5f20bab124ae \ No newline at end of file diff --git a/server/licenses/spatial4j-0.6.jar.sha1 b/server/licenses/spatial4j-0.6.jar.sha1 deleted file mode 100644 index 740a25b1c9016..0000000000000 --- a/server/licenses/spatial4j-0.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -21b15310bddcfd8c72611c180f20cf23279809a3 \ No newline at end of file diff --git a/server/licenses/spatial4j-0.7.jar.sha1 b/server/licenses/spatial4j-0.7.jar.sha1 new file mode 100644 index 0000000000000..2244eb6800408 --- /dev/null +++ b/server/licenses/spatial4j-0.7.jar.sha1 @@ -0,0 +1 @@ +faa8ba85d503da4ab872d17ba8c00da0098ab2f2 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 93683259c8080..2652afce9b4a1 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -151,21 +151,23 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_1_3 = new Version(V_6_1_3_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); public static final int V_6_1_4_ID = 6010499; public static final Version V_6_1_4 = new Version(V_6_1_4_ID, org.apache.lucene.util.Version.LUCENE_7_1_0); + // The below version is missing from the 7.3 JAR + private static final org.apache.lucene.util.Version LUCENE_7_2_1 = org.apache.lucene.util.Version.fromBits(7, 2, 1); public static final int V_6_2_0_ID = 6020099; - public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_0 = new Version(V_6_2_0_ID, LUCENE_7_2_1); public static final int V_6_2_1_ID = 6020199; - public static final Version V_6_2_1 = new Version(V_6_2_1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_1 = new Version(V_6_2_1_ID, LUCENE_7_2_1); public static final int V_6_2_2_ID = 6020299; - public static final Version V_6_2_2 = new Version(V_6_2_2_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_2 = new Version(V_6_2_2_ID, LUCENE_7_2_1); public static final int V_6_2_3_ID = 6020399; - public static final Version V_6_2_3 = new Version(V_6_2_3_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_3 = new Version(V_6_2_3_ID, LUCENE_7_2_1); public static final int V_6_2_4_ID = 6020499; - public static final Version V_6_2_4 = new Version(V_6_2_4_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, LUCENE_7_2_1); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = - new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_1); + new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); public static final Version CURRENT = V_7_0_0_alpha1; static { diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java index 9eb1fa9a3f4ab..ee480ffad7092 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoShapeType.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.builders.CircleBuilder; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java b/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java index c800e01159432..63c71adb1dc58 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java +++ b/server/src/main/java/org/elasticsearch/common/geo/ShapesAvailability.java @@ -36,7 +36,7 @@ public class ShapesAvailability { boolean xJTS_AVAILABLE; try { - Class.forName("com.vividsolutions.jts.geom.GeometryFactory"); + Class.forName("org.locationtech.jts.geom.GeometryFactory"); xJTS_AVAILABLE = true; } catch (ClassNotFoundException ignored) { xJTS_AVAILABLE = false; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java index 024ec91e88765..9c58877653e16 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CircleBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Circle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java index 2eaf5f26dc78b..fdf2295c5f8eb 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/CoordinatesBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchException; import java.util.ArrayList; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 34da7e7fc2f6c..a878a7c6d8618 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Rectangle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index a888ee0867cb2..035c4566a5763 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -19,10 +19,10 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LineString; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LineString; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index 13f9968864c32..68da45bbf0c68 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -22,9 +22,9 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.elasticsearch.common.geo.parsers.ShapeParser; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.LineString; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 03d7683c8e113..be356d4ac2f11 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.XShapeCollection; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index 168d57c1764a7..3d917bcff6e48 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.geo.parsers.GeoWKTParser; import org.locationtech.spatial4j.shape.Shape; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index 0380e0be07392..e4e763d9b3a99 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.locationtech.spatial4j.shape.Point; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index dade127456c8c..3b98f5b98e439 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiPolygon; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.parsers.ShapeParser; diff --git a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index cd0ecdc4aeb88..fbb2fd19f0e6d 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/server/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; import org.apache.logging.log4j.Logger; import org.elasticsearch.Assertions; diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java index 98f8f57d39734..d150647a781e4 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/CoordinateNode.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java index 31107d763913e..49b7d68b583ff 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoJsonParser.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoPoint; diff --git a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java index 74e463c723a5a..20b159222d251 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java +++ b/server/src/main/java/org/elasticsearch/common/geo/parsers/GeoWKTParser.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.common.geo.parsers; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoShapeType; diff --git a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java index 0b8bccb784f24..5ea32f98a88a5 100644 --- a/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java +++ b/server/src/test/java/org/apache/lucene/search/uhighlight/CustomPassageFormatterTests.java @@ -43,7 +43,7 @@ public void testSimpleFormat() { int end = start + match.length(); passage1.setStartOffset(0); passage1.setEndOffset(end + 2); //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); + passage1.addMatch(start, end, matchBytesRef, 1); passages[0] = passage1; Passage passage2 = new Passage(); @@ -51,7 +51,7 @@ public void testSimpleFormat() { end = start + match.length(); passage2.setStartOffset(passage1.getEndOffset()); passage2.setEndOffset(end + 26); - passage2.addMatch(start, end, matchBytesRef); + passage2.addMatch(start, end, matchBytesRef, 1); passages[1] = passage2; Passage passage3 = new Passage(); @@ -84,7 +84,7 @@ public void testHtmlEncodeFormat() { int end = start + match.length(); passage1.setStartOffset(0); passage1.setEndOffset(end + 6); //lets include the whitespace at the end to make sure we trim it - passage1.addMatch(start, end, matchBytesRef); + passage1.addMatch(start, end, matchBytesRef, 1); passages[0] = passage1; Passage passage2 = new Passage(); @@ -92,7 +92,7 @@ public void testHtmlEncodeFormat() { end = start + match.length(); passage2.setStartOffset(passage1.getEndOffset()); passage2.setEndOffset(content.length()); - passage2.addMatch(start, end, matchBytesRef); + passage2.addMatch(start, end, matchBytesRef, 1); passages[1] = passage2; Snippet[] fragments = passageFormatter.format(passages, content); diff --git a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java index fff415de5550e..f7771f0f84466 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java +++ b/server/src/test/java/org/elasticsearch/common/geo/BaseGeoParsingTestCase.java @@ -18,8 +18,8 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.GeometryFactory; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.GeometryFactory; import org.elasticsearch.common.geo.parsers.ShapeParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java index 0a0b9d6583bbb..6f9128454f374 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoJsonShapeParserTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.Point; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java index 0a113549d1664..3189a4fcdb091 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoWKTShapeParserTests.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.LinearRing; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.Point; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.LinearRing; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.Point; +import org.locationtech.jts.geom.Polygon; import org.apache.lucene.geo.GeoTestUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; diff --git a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java index 22877b8ff3b3c..78c3963bd0429 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/ShapeBuilderTests.java @@ -19,9 +19,9 @@ package org.elasticsearch.common.geo; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.CircleBuilder; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 348ac049f28d8..b3892d9d551f5 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.unit.DistanceUnit; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java index b5fe3222b7385..cfd9d76fddb82 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/EnvelopeBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.locationtech.spatial4j.shape.Rectangle; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java index 3b5f2662316ca..b0b11afa97c62 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java index b650939594077..1f6565eecca60 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java index c0a799e1c306e..cd29a416b0904 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java index bf2a7da910b4d..9197ca3d61116 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java index 8501760d1e772..7f8b893caf085 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.geo.builders; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java index 7b8c1177ec8ac..b5fb281454010 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoPolygonQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.common.ParsingException; diff --git a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 99713c140c9e0..3282077ba6a77 100644 --- a/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.query; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index c877cb3be180c..d3a31f12c57db 100644 --- a/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.locationtech.spatial4j.shape.Rectangle; -import com.vividsolutions.jts.geom.Coordinate; +import org.locationtech.jts.geom.Coordinate; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; diff --git a/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java b/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java index 4a473893e9047..7fbfa0670f9c9 100644 --- a/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java +++ b/server/src/test/java/org/elasticsearch/test/geo/RandomShapeGenerator.java @@ -20,9 +20,9 @@ package org.elasticsearch.test.geo; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import com.vividsolutions.jts.algorithm.ConvexHull; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; +import org.locationtech.jts.algorithm.ConvexHull; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.geo.builders.CoordinatesBuilder; import org.elasticsearch.common.geo.builders.GeometryCollectionBuilder; diff --git a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java index 4f5a7d8ac1faa..7213d7bf9802f 100644 --- a/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java +++ b/server/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchGeoAssertions.java @@ -26,13 +26,13 @@ import org.locationtech.spatial4j.shape.impl.RectangleImpl; import org.locationtech.spatial4j.shape.jts.JtsGeometry; import org.locationtech.spatial4j.shape.jts.JtsPoint; -import com.vividsolutions.jts.geom.Coordinate; -import com.vividsolutions.jts.geom.Geometry; -import com.vividsolutions.jts.geom.LineString; -import com.vividsolutions.jts.geom.MultiLineString; -import com.vividsolutions.jts.geom.MultiPoint; -import com.vividsolutions.jts.geom.MultiPolygon; -import com.vividsolutions.jts.geom.Polygon; +import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; +import org.locationtech.jts.geom.LineString; +import org.locationtech.jts.geom.MultiLineString; +import org.locationtech.jts.geom.MultiPoint; +import org.locationtech.jts.geom.MultiPolygon; +import org.locationtech.jts.geom.Polygon; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.unit.DistanceUnit; diff --git a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java index 29d58ae25777f..232ad14aabc55 100644 --- a/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/indices/analysis/AnalysisFactoryTestCase.java @@ -216,6 +216,8 @@ private static String toCamelCase(String s) { .put("tokenoffsetpayload", Void.class) // puts the type into the payload .put("typeaspayload", Void.class) + // puts the type as a synonym + .put("typeassynonym", Void.class) // fingerprint .put("fingerprint", Void.class) // for tee-sinks @@ -463,11 +465,6 @@ public void testPreBuiltMultiTermAware() { Set classesThatShouldNotHaveMultiTermSupport = new HashSet<>(actual); classesThatShouldNotHaveMultiTermSupport.removeAll(expected); - classesThatShouldNotHaveMultiTermSupport.remove("token filter [trim]"); - if (Version.CURRENT.luceneVersion.onOrAfter(org.apache.lucene.util.Version.fromBits(7, 3, 0))) { - // TODO: remove the above exclusion when we move to lucene 7.3 - assert false; - } assertTrue("Pre-built components should not have multi-term support: " + classesThatShouldNotHaveMultiTermSupport, classesThatShouldNotHaveMultiTermSupport.isEmpty()); } From f8602b1c7ef143ec1054f658bdf4e5ab1cca6dc5 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 3 Apr 2018 09:55:04 +0200 Subject: [PATCH 37/68] Fix Eclipse build. Relates #29300 --- libs/x-content/src/main/eclipse-build.gradle | 3 +++ libs/x-content/src/test/eclipse-build.gradle | 7 +++++++ settings.gradle | 5 +++++ 3 files changed, 15 insertions(+) create mode 100644 libs/x-content/src/main/eclipse-build.gradle create mode 100644 libs/x-content/src/test/eclipse-build.gradle diff --git a/libs/x-content/src/main/eclipse-build.gradle b/libs/x-content/src/main/eclipse-build.gradle new file mode 100644 index 0000000000000..a17f089781183 --- /dev/null +++ b/libs/x-content/src/main/eclipse-build.gradle @@ -0,0 +1,3 @@ + +// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests +apply from: '../../build.gradle' diff --git a/libs/x-content/src/test/eclipse-build.gradle b/libs/x-content/src/test/eclipse-build.gradle new file mode 100644 index 0000000000000..f456f71a4c310 --- /dev/null +++ b/libs/x-content/src/test/eclipse-build.gradle @@ -0,0 +1,7 @@ + +// this is just shell gradle file for eclipse to have separate projects for secure-sm src and tests +apply from: '../../build.gradle' + +dependencies { + testCompile project(':libs:x-content') +} diff --git a/settings.gradle b/settings.gradle index 420b4104d621d..76b157d0e4a3b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -80,6 +80,7 @@ if (isEclipse) { projects << 'server-tests' projects << 'libs:elasticsearch-core-tests' projects << 'libs:elasticsearch-nio-tests' + projects << 'libs:x-content-tests' projects << 'libs:secure-sm-tests' projects << 'libs:grok-tests' } @@ -101,6 +102,10 @@ if (isEclipse) { project(":libs:elasticsearch-nio").buildFileName = 'eclipse-build.gradle' project(":libs:elasticsearch-nio-tests").projectDir = new File(rootProject.projectDir, 'libs/elasticsearch-nio/src/test') project(":libs:elasticsearch-nio-tests").buildFileName = 'eclipse-build.gradle' + project(":libs:x-content").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/main') + project(":libs:x-content").buildFileName = 'eclipse-build.gradle' + project(":libs:x-content-tests").projectDir = new File(rootProject.projectDir, 'libs/x-content/src/test') + project(":libs:x-content-tests").buildFileName = 'eclipse-build.gradle' project(":libs:secure-sm").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/main') project(":libs:secure-sm").buildFileName = 'eclipse-build.gradle' project(":libs:secure-sm-tests").projectDir = new File(rootProject.projectDir, 'libs/secure-sm/src/test') From 0028563aace400e1af380cbf44d30bfb6aee4171 Mon Sep 17 00:00:00 2001 From: rationull Date: Tue, 3 Apr 2018 01:57:49 -0700 Subject: [PATCH 38/68] Pass through script params in scripted metric agg (#29154) * Pass script level params into scripted metric aggs (#28819) Now params that are passed at the script level and at the aggregation level are merged and can both be used in the aggregation scripts. If there are any conflicts, aggregation level params will win. This may be followed by another change detecting that case and throwing an exception to disallow such conflicts. * Disallow duplicate parameter names between scripted agg and script (#28819) If a scripted metric aggregation has aggregation params and script params which have the same name, throw an IllegalArgumentException when merging the parameter lists. --- .../ScriptedMetricAggregationBuilder.java | 19 ++++- .../ScriptedMetricAggregatorFactory.java | 50 +++++++++---- .../metrics/ScriptedMetricIT.java | 31 +++++--- .../ScriptedMetricAggregatorTests.java | 72 ++++++++++++++++++- 4 files changed, 146 insertions(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java index 69ac175c419c8..c11c68f9b2524 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregationBuilder.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.Map; import java.util.Objects; @@ -198,20 +199,34 @@ protected ScriptedMetricAggregatorFactory doBuild(SearchContext context, Aggrega Builder subfactoriesBuilder) throws IOException { QueryShardContext queryShardContext = context.getQueryShardContext(); + + // Extract params from scripts and pass them along to ScriptedMetricAggregatorFactory, since it won't have + // access to them for the scripts it's given precompiled. + ExecutableScript.Factory executableInitScript; + Map initScriptParams; if (initScript != null) { executableInitScript = queryShardContext.getScriptService().compile(initScript, ExecutableScript.AGGS_CONTEXT); + initScriptParams = initScript.getParams(); } else { executableInitScript = p -> null; + initScriptParams = Collections.emptyMap(); } + SearchScript.Factory searchMapScript = queryShardContext.getScriptService().compile(mapScript, SearchScript.AGGS_CONTEXT); + Map mapScriptParams = mapScript.getParams(); + ExecutableScript.Factory executableCombineScript; + Map combineScriptParams; if (combineScript != null) { - executableCombineScript =queryShardContext.getScriptService().compile(combineScript, ExecutableScript.AGGS_CONTEXT); + executableCombineScript = queryShardContext.getScriptService().compile(combineScript, ExecutableScript.AGGS_CONTEXT); + combineScriptParams = combineScript.getParams(); } else { executableCombineScript = p -> null; + combineScriptParams = Collections.emptyMap(); } - return new ScriptedMetricAggregatorFactory(name, searchMapScript, executableInitScript, executableCombineScript, reduceScript, + return new ScriptedMetricAggregatorFactory(name, searchMapScript, mapScriptParams, executableInitScript, initScriptParams, + executableCombineScript, combineScriptParams, reduceScript, params, queryShardContext.lookup(), context, parent, subfactoriesBuilder, metaData); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java index aa7de3e1ab6e1..0bc6a614e541f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorFactory.java @@ -35,28 +35,35 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; public class ScriptedMetricAggregatorFactory extends AggregatorFactory { private final SearchScript.Factory mapScript; + private final Map mapScriptParams; private final ExecutableScript.Factory combineScript; + private final Map combineScriptParams; private final Script reduceScript; - private final Map params; + private final Map aggParams; private final SearchLookup lookup; private final ExecutableScript.Factory initScript; + private final Map initScriptParams; - public ScriptedMetricAggregatorFactory(String name, SearchScript.Factory mapScript, ExecutableScript.Factory initScript, - ExecutableScript.Factory combineScript, Script reduceScript, Map params, + public ScriptedMetricAggregatorFactory(String name, SearchScript.Factory mapScript, Map mapScriptParams, + ExecutableScript.Factory initScript, Map initScriptParams, + ExecutableScript.Factory combineScript, Map combineScriptParams, + Script reduceScript, Map aggParams, SearchLookup lookup, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { super(name, context, parent, subFactories, metaData); this.mapScript = mapScript; + this.mapScriptParams = mapScriptParams; this.initScript = initScript; + this.initScriptParams = initScriptParams; this.combineScript = combineScript; + this.combineScriptParams = combineScriptParams; this.reduceScript = reduceScript; this.lookup = lookup; - this.params = params; + this.aggParams = aggParams; } @Override @@ -65,26 +72,26 @@ public Aggregator createInternal(Aggregator parent, boolean collectsFromSingleBu if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - Map params = this.params; - if (params != null) { - params = deepCopyParams(params, context); + Map aggParams = this.aggParams; + if (aggParams != null) { + aggParams = deepCopyParams(aggParams, context); } else { - params = new HashMap<>(); + aggParams = new HashMap<>(); } - if (params.containsKey("_agg") == false) { - params.put("_agg", new HashMap()); + if (aggParams.containsKey("_agg") == false) { + aggParams.put("_agg", new HashMap()); } - final ExecutableScript initScript = this.initScript.newInstance(params); - final SearchScript.LeafFactory mapScript = this.mapScript.newFactory(params, lookup); - final ExecutableScript combineScript = this.combineScript.newInstance(params); + final ExecutableScript initScript = this.initScript.newInstance(mergeParams(aggParams, initScriptParams)); + final SearchScript.LeafFactory mapScript = this.mapScript.newFactory(mergeParams(aggParams, mapScriptParams), lookup); + final ExecutableScript combineScript = this.combineScript.newInstance(mergeParams(aggParams, combineScriptParams)); final Script reduceScript = deepCopyScript(this.reduceScript, context); if (initScript != null) { initScript.run(); } return new ScriptedMetricAggregator(name, mapScript, - combineScript, reduceScript, params, context, parent, + combineScript, reduceScript, aggParams, context, parent, pipelineAggregators, metaData); } @@ -128,5 +135,18 @@ private static T deepCopyParams(T original, SearchContext context) { return clone; } + private static Map mergeParams(Map agg, Map script) { + // Start with script params + Map combined = new HashMap<>(script); + // Add in agg params, throwing an exception if any conflicts are detected + for (Map.Entry aggEntry : agg.entrySet()) { + if (combined.putIfAbsent(aggEntry.getKey(), aggEntry.getValue()) != null) { + throw new IllegalArgumentException("Parameter name \"" + aggEntry.getKey() + + "\" used in both aggregation and script parameters"); + } + } + + return combined; + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 24d94d5a4643c..9db5b237a858c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; @@ -62,6 +64,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -322,11 +325,11 @@ public void testMap() { assertThat(numShardsRun, greaterThan(0)); } - public void testMapWithParams() { + public void testExplicitAggParam() { Map params = new HashMap<>(); params.put("_agg", new ArrayList<>()); - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", Collections.emptyMap()); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) @@ -361,17 +364,17 @@ public void testMapWithParams() { } public void testMapWithParamsAndImplicitAggMap() { - Map params = new HashMap<>(); - // don't put any _agg map in params - params.put("param1", "12"); - params.put("param2", 1); + // Split the params up between the script and the aggregation. + // Don't put any _agg map in params. + Map scriptParams = Collections.singletonMap("param1", "12"); + Map aggregationParams = Collections.singletonMap("param2", 1); // The _agg hashmap will be available even if not declared in the params map - Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", params); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg[param1] = param2", scriptParams); SearchResponse response = client().prepareSearch("idx") .setQuery(matchAllQuery()) - .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)) + .addAggregation(scriptedMetric("scripted").params(aggregationParams).mapScript(mapScript)) .get(); assertSearchResponse(response); assertThat(response.getHits().getTotalHits(), equalTo(numDocs)); @@ -1001,4 +1004,16 @@ public void testDontCacheScripts() throws Exception { assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() .getMissCount(), equalTo(0L)); } + + public void testConflictingAggAndScriptParams() { + Map params = Collections.singletonMap("param1", "12"); + Script mapScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "_agg.add(1)", params); + + SearchRequestBuilder builder = client().prepareSearch("idx") + .setQuery(matchAllQuery()) + .addAggregation(scriptedMetric("scripted").params(params).mapScript(mapScript)); + + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); + assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); + } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java index db2feafe6c4a3..0989b1ce6a3fa 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregatorTests.java @@ -64,8 +64,16 @@ public class ScriptedMetricAggregatorTests extends AggregatorTestCase { Collections.emptyMap()); private static final Script COMBINE_SCRIPT_SCORE = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptScore", Collections.emptyMap()); - private static final Map, Object>> SCRIPTS = new HashMap<>(); + private static final Script INIT_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "initScriptParams", + Collections.singletonMap("initialValue", 24)); + private static final Script MAP_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "mapScriptParams", + Collections.singletonMap("itemValue", 12)); + private static final Script COMBINE_SCRIPT_PARAMS = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "combineScriptParams", + Collections.singletonMap("divisor", 4)); + private static final String CONFLICTING_PARAM_NAME = "initialValue"; + + private static final Map, Object>> SCRIPTS = new HashMap<>(); @BeforeClass @SuppressWarnings("unchecked") @@ -99,6 +107,26 @@ public static void initMockScripts() { Map agg = (Map) params.get("_agg"); return ((List) agg.get("collector")).stream().mapToDouble(Double::doubleValue).sum(); }); + + SCRIPTS.put("initScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + Integer initialValue = (Integer)params.get("initialValue"); + ArrayList collector = new ArrayList(); + collector.add(initialValue); + agg.put("collector", collector); + return agg; + }); + SCRIPTS.put("mapScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + Integer itemValue = (Integer) params.get("itemValue"); + ((List) agg.get("collector")).add(itemValue); + return agg; + }); + SCRIPTS.put("combineScriptParams", params -> { + Map agg = (Map) params.get("_agg"); + int divisor = ((Integer) params.get("divisor")); + return ((List) agg.get("collector")).stream().mapToInt(Integer::intValue).map(i -> i / divisor).sum(); + }); } @SuppressWarnings("unchecked") @@ -187,6 +215,48 @@ public void testScriptedMetricWithCombineAccessesScores() throws IOException { } } + public void testScriptParamsPassedThrough() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + aggregationBuilder.initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS).combineScript(COMBINE_SCRIPT_PARAMS); + ScriptedMetric scriptedMetric = search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder); + + // The result value depends on the script params. + assertEquals(306, scriptedMetric.aggregation()); + } + } + } + + public void testConflictingAggAndScriptParams() throws IOException { + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + for (int i = 0; i < 100; i++) { + indexWriter.addDocument(singleton(new SortedNumericDocValuesField("number", i))); + } + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + ScriptedMetricAggregationBuilder aggregationBuilder = new ScriptedMetricAggregationBuilder(AGG_NAME); + Map aggParams = Collections.singletonMap(CONFLICTING_PARAM_NAME, "blah"); + aggregationBuilder.params(aggParams).initScript(INIT_SCRIPT_PARAMS).mapScript(MAP_SCRIPT_PARAMS). + combineScript(COMBINE_SCRIPT_PARAMS); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> + search(newSearcher(indexReader, true, true), new MatchAllDocsQuery(), aggregationBuilder) + ); + assertEquals("Parameter name \"" + CONFLICTING_PARAM_NAME + "\" used in both aggregation and script parameters", + ex.getMessage()); + } + } + } + /** * We cannot use Mockito for mocking QueryShardContext in this case because * script-related methods (e.g. QueryShardContext#getLazyExecutableScript) From 2b07f63bd58c08420f8c8ff02c53acc0b8eec98a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 3 Apr 2018 11:15:44 +0200 Subject: [PATCH 39/68] Fix NDCG for empty search results (#29267) Fixes and edge case where DiscountedCumulativeGain can return NaN as result of the quality metric calculation. This can happen when the search result set is empty and normalization is used. We should return 0 in this case. Also adding related unit tests to the other two metrics. --- .../rankeval/DiscountedCumulativeGain.java | 9 ++++--- .../index/rankeval/MeanReciprocalRank.java | 4 +++ .../DiscountedCumulativeGainTests.java | 26 +++++++++++++++++++ .../rankeval/MeanReciprocalRankTests.java | 7 +++++ .../index/rankeval/PrecisionAtKTests.java | 8 ++++++ 5 files changed, 51 insertions(+), 3 deletions(-) diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java index edb69fcb93523..3019532779800 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGain.java @@ -140,9 +140,12 @@ public EvalQueryQuality evaluate(String taskId, SearchHit[] hits, if (normalize) { Collections.sort(allRatings, Comparator.nullsLast(Collections.reverseOrder())); - double idcg = computeDCG( - allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); - dcg = dcg / idcg; + double idcg = computeDCG(allRatings.subList(0, Math.min(ratingsInSearchHits.size(), allRatings.size()))); + if (idcg > 0) { + dcg = dcg / idcg; + } else { + dcg = 0; + } } EvalQueryQuality evalQueryQuality = new EvalQueryQuality(taskId, dcg); evalQueryQuality.addHitsAndRatings(ratedHits); diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java index ef510b399d409..0f51f6d5d6369 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/MeanReciprocalRank.java @@ -228,6 +228,10 @@ public String getWriteableName() { return NAME; } + /** + * the ranking of the first relevant document, or -1 if no relevant document was + * found + */ int getFirstRelevantRank() { return firstRelevantRank; } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java index ea14e51512b24..22c3542c0fab4 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/DiscountedCumulativeGainTests.java @@ -205,6 +205,32 @@ public void testDCGAtFourMoreRatings() { assertEquals(12.392789260714371 / 13.347184833073591, dcg.evaluate("id", hits, ratedDocs).getQualityLevel(), DELTA); } + /** + * test that metric returns 0.0 when there are no search results + */ + public void testNoResults() throws Exception { + Integer[] relevanceRatings = new Integer[] { 3, 2, 3, null, 1, null }; + List ratedDocs = new ArrayList<>(); + for (int i = 0; i < 6; i++) { + if (i < relevanceRatings.length) { + if (relevanceRatings[i] != null) { + ratedDocs.add(new RatedDocument("index", Integer.toString(i), relevanceRatings[i])); + } + } + } + SearchHit[] hits = new SearchHit[0]; + DiscountedCumulativeGain dcg = new DiscountedCumulativeGain(); + EvalQueryQuality result = dcg.evaluate("id", hits, ratedDocs); + assertEquals(0.0d, result.getQualityLevel(), DELTA); + assertEquals(0, filterUnknownDocuments(result.getHitsAndRatings()).size()); + + // also check normalized + dcg = new DiscountedCumulativeGain(true, null, 10); + result = dcg.evaluate("id", hits, ratedDocs); + assertEquals(0.0d, result.getQualityLevel(), DELTA); + assertEquals(0, filterUnknownDocuments(result.getHitsAndRatings()).size()); + } + public void testParseFromXContent() throws IOException { assertParsedCorrect("{ \"unknown_doc_rating\": 2, \"normalize\": true, \"k\" : 15 }", 2, true, 15); assertParsedCorrect("{ \"normalize\": false, \"k\" : 15 }", null, false, 15); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java index 8ab4f146ff724..6604dbc74a065 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/MeanReciprocalRankTests.java @@ -158,6 +158,13 @@ public void testEvaluationNoRelevantInResults() { assertEquals(0.0, evaluation.getQualityLevel(), Double.MIN_VALUE); } + public void testNoResults() throws Exception { + SearchHit[] hits = new SearchHit[0]; + EvalQueryQuality evaluated = (new MeanReciprocalRank()).evaluate("id", hits, Collections.emptyList()); + assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); + assertEquals(-1, ((MeanReciprocalRank.Breakdown) evaluated.getMetricDetails()).getFirstRelevantRank()); + } + public void testXContentRoundtrip() throws IOException { MeanReciprocalRank testItem = createTestItem(); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java index a6d18c3457fa1..aa3dd5a0b7e32 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/PrecisionAtKTests.java @@ -142,6 +142,14 @@ public void testNoRatedDocs() throws Exception { assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); } + public void testNoResults() throws Exception { + SearchHit[] hits = new SearchHit[0]; + EvalQueryQuality evaluated = (new PrecisionAtK()).evaluate("id", hits, Collections.emptyList()); + assertEquals(0.0d, evaluated.getQualityLevel(), 0.00001); + assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRelevantRetrieved()); + assertEquals(0, ((PrecisionAtK.Breakdown) evaluated.getMetricDetails()).getRetrieved()); + } + public void testParseFromXContent() throws IOException { String xContent = " {\n" + " \"relevant_rating_threshold\" : 2" + "}"; try (XContentParser parser = createParser(JsonXContent.jsonXContent, xContent)) { From 989e46596418be21042dda42e5a41ed39e9666f4 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 3 Apr 2018 11:30:43 +0200 Subject: [PATCH 40/68] Use fixture to test repository-s3 plugin (#29296) This commit adds a new fixture that emulates a S3 service in order to improve the existing integration tests. This is very similar to what has been made for Google Cloud Storage in #28788, and such tests would have helped a lot to catch bugs like #22534. The AmazonS3Fixture is brittle and only implements the very necessary stuff for the S3 repository to work, but at least it works and can be adapted for specific tests needs. --- plugins/repository-gcs/build.gradle | 6 + .../gcs/GoogleCloudStorageFixture.java | 11 +- .../test/repository_gcs/10_basic.yml | 17 +- plugins/repository-s3/build.gradle | 25 +- .../repositories/s3/AmazonS3Fixture.java | 137 +++++ .../repositories/s3/AmazonS3TestServer.java | 542 ++++++++++++++++++ .../test/repository_s3/10_basic.yml | 184 +++++- .../test/repository_s3/20_repository.yml | 24 - 8 files changed, 906 insertions(+), 40 deletions(-) create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java delete mode 100644 plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 2ed37be68f9b8..bf2768a4312d8 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -58,6 +58,12 @@ thirdPartyAudit.excludes = [ 'org.apache.log.Logger', ] +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + /** A task to start the GoogleCloudStorageFixture which emulates a Google Cloud Storage service **/ task googleCloudStorageFixture(type: AntFixture) { dependsOn compileTestJava diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java index cddcab870de34..35606d724cc4c 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageFixture.java @@ -52,17 +52,16 @@ */ public class GoogleCloudStorageFixture { - @SuppressForbidden(reason = "PathUtils#get is fine - we don't have environment here") public static void main(String[] args) throws Exception { if (args == null || args.length != 2) { throw new IllegalArgumentException("GoogleCloudStorageFixture "); } - final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 43635); + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); try { - final Path workingDirectory = Paths.get(args[0]); + final Path workingDirectory = workingDir(args[0]); /// Writes the PID of the current Java process in a `pid` file located in the working directory writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); @@ -86,6 +85,11 @@ public static void main(String[] args) throws Exception { } } + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path workingDir(final String dir) { + return Paths.get(dir); + } + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { final Path tempPidFile = Files.createTempFile(dir, null, null); Files.write(tempPidFile, singleton(content)); @@ -101,7 +105,6 @@ private static String addressToString(final SocketAddress address) { } } - @SuppressForbidden(reason = "Use a http server") static class ResponseHandler implements HttpHandler { private final GoogleCloudStorageTestServer storageServer; diff --git a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml index 62387227cbc9d..4f63e4b4e458f 100644 --- a/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml +++ b/plugins/repository-gcs/src/test/resources/rest-api-spec/test/repository_gcs/10_basic.yml @@ -13,9 +13,6 @@ - match: { nodes.$master.plugins.0.name: repository-gcs } --- "Snapshot/Restore with repository-gcs": - - skip: - version: " - 6.3.0" - reason: repository-gcs was not testable through YAML tests until 6.3.0 # Register repository - do: @@ -28,7 +25,15 @@ client: "integration_test" - match: { acknowledged: true } - + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: {repository.settings.bucket : "bucket_test"} + - match: {repository.settings.client : "integration_test"} + # Index documents - do: bulk: @@ -180,7 +185,3 @@ - do: snapshot.delete_repository: repository: repository - - - - diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index ae971cfe4e1ec..46988a2dd5107 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,3 +1,5 @@ +import org.elasticsearch.gradle.test.AntFixture + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -64,9 +66,28 @@ test { exclude '**/*CredentialsTests.class' } +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +/** A task to start the AmazonS3Fixture which emulates a S3 service **/ +task s3Fixture(type: AntFixture) { + dependsOn compileTestJava + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, 'bucket_test' +} + integTestCluster { - keystoreSetting 's3.client.default.access_key', 'myaccesskey' - keystoreSetting 's3.client.default.secret_key', 'mysecretkey' + dependsOn s3Fixture + + keystoreSetting 's3.client.integration_test.access_key', "s3_integration_test_access_key" + keystoreSetting 's3.client.integration_test.secret_key', "s3_integration_test_secret_key" + + /* Use a closure on the string to delay evaluation until tests are executed */ + setting 's3.client.integration_test.endpoint', "http://${ -> s3Fixture.addressAndPort }" } thirdPartyAudit.excludes = [ diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java new file mode 100644 index 0000000000000..c8321e83d1390 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3Fixture.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.repositories.s3.AmazonS3TestServer.Response; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; + +/** + * {@link AmazonS3Fixture} is a fixture that emulates a S3 service. + *

+ * It starts an asynchronous socket server that binds to a random local port. The server parses + * HTTP requests and uses a {@link AmazonS3TestServer} to handle them before returning + * them to the client as HTTP responses. + */ +public class AmazonS3Fixture { + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("AmazonS3Fixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = workingDir(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Emulates S3 + final String storageUrl = "http://" + addressAndPort; + final AmazonS3TestServer storageTestServer = new AmazonS3TestServer(storageUrl); + storageTestServer.createBucket(args[1]); + + httpServer.createContext("/", new ResponseHandler(storageTestServer)); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path workingDir(final String dir) { + return Paths.get(dir); + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + static class ResponseHandler implements HttpHandler { + + private final AmazonS3TestServer storageServer; + + private ResponseHandler(final AmazonS3TestServer storageServer) { + this.storageServer = storageServer; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + String method = exchange.getRequestMethod(); + String path = storageServer.getEndpoint() + exchange.getRequestURI().getRawPath(); + String query = exchange.getRequestURI().getRawQuery(); + Map> headers = exchange.getRequestHeaders(); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + Streams.copy(exchange.getRequestBody(), out); + + final Response storageResponse = storageServer.handle(method, path, query, headers, out.toByteArray()); + + Map> responseHeaders = exchange.getResponseHeaders(); + responseHeaders.put("Content-Type", singletonList(storageResponse.contentType)); + storageResponse.headers.forEach((k, v) -> responseHeaders.put(k, singletonList(v))); + exchange.sendResponseHeaders(storageResponse.status.getStatus(), storageResponse.body.length); + if (storageResponse.body.length > 0) { + exchange.getResponseBody().write(storageResponse.body); + } + exchange.close(); + } + } +} diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java new file mode 100644 index 0000000000000..a3ea287b7f829 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AmazonS3TestServer.java @@ -0,0 +1,542 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.util.DateUtils; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.path.PathTrie; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.RestUtils; + +import java.io.BufferedInputStream; +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; + +/** + * {@link AmazonS3TestServer} emulates a S3 service through a {@link #handle(String, String, String, Map, byte[])} + * method that provides appropriate responses for specific requests like the real S3 platform would do. + * It is largely based on official documentation available at https://docs.aws.amazon.com/AmazonS3/latest/API/. + */ +public class AmazonS3TestServer { + + private static byte[] EMPTY_BYTE = new byte[0]; + /** List of the buckets stored on this test server **/ + private final Map buckets = ConcurrentCollections.newConcurrentMap(); + + /** Request handlers for the requests made by the S3 client **/ + private final PathTrie handlers; + + /** Server endpoint **/ + private final String endpoint; + + /** Increments for the requests ids **/ + private final AtomicLong requests = new AtomicLong(0); + + /** + * Creates a {@link AmazonS3TestServer} with a custom endpoint + */ + AmazonS3TestServer(final String endpoint) { + this.endpoint = Objects.requireNonNull(endpoint, "endpoint must not be null"); + this.handlers = defaultHandlers(endpoint, buckets); + } + + /** Creates a bucket in the test server **/ + void createBucket(final String bucketName) { + buckets.put(bucketName, new Bucket(bucketName)); + } + + public String getEndpoint() { + return endpoint; + } + + /** + * Returns a response for the given request + * + * @param method the HTTP method of the request + * @param path the path of the URL of the request + * @param query the queryString of the URL of request + * @param headers the HTTP headers of the request + * @param body the HTTP request body + * @return a {@link Response} + * @throws IOException if something goes wrong + */ + public Response handle(final String method, + final String path, + final String query, + final Map> headers, + byte[] body) throws IOException { + + final long requestId = requests.incrementAndGet(); + + final Map params = new HashMap<>(); + if (query != null) { + RestUtils.decodeQueryString(query, 0, params); + } + + final List authorizations = headers.get("Authorization"); + if (authorizations == null + || (authorizations.isEmpty() == false & authorizations.get(0).contains("s3_integration_test_access_key") == false)) { + return newError(requestId, RestStatus.FORBIDDEN, "AccessDenied", "Access Denied", ""); + } + + final RequestHandler handler = handlers.retrieve(method + " " + path, params); + if (handler != null) { + return handler.execute(params, headers, body, requestId); + } else { + return newInternalError(requestId, "No handler defined for request [method: " + method + ", path: " + path + "]"); + } + } + + @FunctionalInterface + interface RequestHandler { + + /** + * Simulates the execution of a S3 request and returns a corresponding response. + * + * @param params the request's query string parameters + * @param headers the request's headers + * @param body the request body provided as a byte array + * @param requestId a unique id for the incoming request + * @return the corresponding response + * + * @throws IOException if something goes wrong + */ + Response execute(Map params, Map> headers, byte[] body, long requestId) throws IOException; + } + + /** Builds the default request handlers **/ + private static PathTrie defaultHandlers(final String endpoint, final Map buckets) { + final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); + + // HEAD Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html + objectsPaths("HEAD " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + for (Map.Entry object : bucket.objects.entrySet()) { + if (object.getKey().equals(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // PUT Object & PUT Object Copy + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html + objectsPaths("PUT " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String destBucketName = params.get("bucket"); + + final Bucket destBucket = buckets.get(destBucketName); + if (destBucket == null) { + return newBucketNotFoundError(id, destBucketName); + } + + final String destObjectName = objectName(params); + + // Request is a copy request + List headerCopySource = headers.getOrDefault("x-amz-copy-source", emptyList()); + if (headerCopySource.isEmpty() == false) { + String srcObjectName = headerCopySource.get(0); + + Bucket srcBucket = null; + for (Bucket bucket : buckets.values()) { + String prefix = "/" + bucket.name + "/"; + if (srcObjectName.startsWith(prefix)) { + srcObjectName = srcObjectName.replaceFirst(prefix, ""); + srcBucket = bucket; + break; + } + } + + if (srcBucket == null || srcBucket.objects.containsKey(srcObjectName) == false) { + return newObjectNotFoundError(id, srcObjectName); + } + + byte[] bytes = srcBucket.objects.get(srcObjectName); + if (bytes != null) { + destBucket.objects.put(destObjectName, bytes); + return newCopyResultResponse(id); + } else { + return newObjectNotFoundError(id, srcObjectName); + } + } else { + // This is a chunked upload request. We should have the header "Content-Encoding : aws-chunked,gzip" + // to detect it but it seems that the AWS SDK does not follow the S3 guidelines here. + // + // See https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html + // + List headerDecodedContentLength = headers.getOrDefault("X-amz-decoded-content-length", emptyList()); + if (headerDecodedContentLength.size() == 1) { + int contentLength = Integer.valueOf(headerDecodedContentLength.get(0)); + + // Chunked requests have a payload like this: + // + // 105;chunk-signature=01d0de6be013115a7f4794db8c4b9414e6ec71262cc33ae562a71f2eaed1efe8 + // ... bytes of data .... + // 0;chunk-signature=f890420b1974c5469aaf2112e9e6f2e0334929fd45909e03c0eff7a84124f6a4 + // + try (BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(body))) { + int b; + // Moves to the end of the first signature line + while ((b = inputStream.read()) != -1) { + if (b == '\n') { + break; + } + } + + final byte[] bytes = new byte[contentLength]; + inputStream.read(bytes, 0, contentLength); + + destBucket.objects.put(destObjectName, bytes); + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + } + } + return newInternalError(id, "Something is wrong with this PUT request"); + }) + ); + + // DELETE Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html + objectsPaths("DELETE " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.remove(objectName) != null) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // GET Object + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html + objectsPaths("GET " + endpoint + "/{bucket}").forEach(path -> + handlers.insert(path, (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + final String objectName = objectName(params); + if (bucket.objects.containsKey(objectName)) { + return new Response(RestStatus.OK, emptyMap(), "application/octet-stream", bucket.objects.get(objectName)); + + } + return newObjectNotFoundError(id, objectName); + }) + ); + + // HEAD Bucket + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketHEAD.html + handlers.insert("HEAD " + endpoint + "/{bucket}", (params, headers, body, id) -> { + String bucket = params.get("bucket"); + if (Strings.hasText(bucket) && buckets.containsKey(bucket)) { + return new Response(RestStatus.OK, emptyMap(), "text/plain", EMPTY_BYTE); + } else { + return newBucketNotFoundError(id, bucket); + } + }); + + // GET Bucket (List Objects) Version 1 + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html + handlers.insert("GET " + endpoint + "/{bucket}/", (params, headers, body, id) -> { + final String bucketName = params.get("bucket"); + + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newBucketNotFoundError(id, bucketName); + } + + String prefix = params.get("prefix"); + if (prefix == null) { + List prefixes = headers.get("Prefix"); + if (prefixes != null && prefixes.size() == 1) { + prefix = prefixes.get(0); + } + } + return newListBucketResultResponse(id, bucket, prefix); + }); + + // Delete Multiple Objects + // + // https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html + handlers.insert("POST " + endpoint + "/", (params, headers, body, id) -> { + final List deletes = new ArrayList<>(); + final List errors = new ArrayList<>(); + + if (params.containsKey("delete")) { + // The request body is something like: + // ...... + String request = Streams.copyToString(new InputStreamReader(new ByteArrayInputStream(body), StandardCharsets.UTF_8)); + if (request.startsWith("")) { + final String startMarker = ""; + final String endMarker = ""; + + int offset = 0; + while (offset != -1) { + offset = request.indexOf(startMarker, offset); + if (offset > 0) { + int closingOffset = request.indexOf(endMarker, offset); + if (closingOffset != -1) { + offset = offset + startMarker.length(); + final String objectName = request.substring(offset, closingOffset); + + boolean found = false; + for (Bucket bucket : buckets.values()) { + if (bucket.objects.remove(objectName) != null) { + found = true; + } + } + + if (found) { + deletes.add(objectName); + } else { + errors.add(objectName); + } + } + } + } + return newDeleteResultResponse(id, deletes, errors); + } + } + return newInternalError(id, "Something is wrong with this POST multiple deletes request"); + }); + + return handlers; + } + + /** + * Represents a S3 bucket. + */ + static class Bucket { + + /** Bucket name **/ + final String name; + + /** Blobs contained in the bucket **/ + final Map objects; + + Bucket(final String name) { + this.name = Objects.requireNonNull(name); + this.objects = ConcurrentCollections.newConcurrentMap(); + } + } + + /** + * Represents a HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } + + /** + * Decline a path like "http://host:port/{bucket}" into 10 derived paths like: + * - http://host:port/{bucket}/{path0} + * - http://host:port/{bucket}/{path0}/{path1} + * - http://host:port/{bucket}/{path0}/{path1}/{path2} + * - etc + */ + private static List objectsPaths(final String path) { + final List paths = new ArrayList<>(); + String p = path; + for (int i = 0; i < 10; i++) { + p = p + "/{path" + i + "}"; + paths.add(p); + } + return paths; + } + + /** + * Retrieves the object name from all derives paths named {pathX} where 0 <= X < 10. + * + * This is the counterpart of {@link #objectsPaths(String)} + */ + private static String objectName(final Map params) { + final StringBuilder name = new StringBuilder(); + for (int i = 0; i < 10; i++) { + String value = params.getOrDefault("path" + i, null); + if (value != null) { + if (name.length() > 0) { + name.append('/'); + } + name.append(value); + } + } + return name.toString(); + } + + /** + * S3 ListBucketResult Response + */ + private static Response newListBucketResultResponse(final long requestId, final Bucket bucket, final String prefix) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append(""); + if (prefix != null) { + response.append(prefix); + } + response.append(""); + response.append(""); + response.append("1000"); + response.append("false"); + + int count = 0; + for (Map.Entry object : bucket.objects.entrySet()) { + String objectName = object.getKey(); + if (prefix == null || objectName.startsWith(prefix)) { + response.append(""); + response.append("").append(objectName).append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append(""").append(count++).append("""); + response.append("").append(object.getValue().length).append(""); + response.append(""); + } + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 Copy Result Response + */ + private static Response newCopyResultResponse(final long requestId) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(DateUtils.formatISO8601Date(new Date())).append(""); + response.append("").append(requestId).append(""); + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + /** + * S3 DeleteResult Response + */ + private static Response newDeleteResultResponse(final long requestId, + final List deletedObjects, + final List ignoredObjects) { + final String id = Long.toString(requestId); + + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + for (String deletedObject : deletedObjects) { + response.append(""); + response.append("").append(deletedObject).append(""); + response.append(""); + } + for (String ignoredObject : ignoredObjects) { + response.append(""); + response.append("").append(ignoredObject).append(""); + response.append("NoSuchKey"); + response.append(""); + } + response.append(""); + return new Response(RestStatus.OK, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } + + private static Response newBucketNotFoundError(final long requestId, final String bucket) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchBucket", "The specified bucket does not exist", bucket); + } + + private static Response newObjectNotFoundError(final long requestId, final String object) { + return newError(requestId, RestStatus.NOT_FOUND, "NoSuchKey", "The specified key does not exist", object); + } + + private static Response newInternalError(final long requestId, final String resource) { + return newError(requestId, RestStatus.INTERNAL_SERVER_ERROR, "InternalError", "We encountered an internal error", resource); + } + + /** + * S3 Error + * + * https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + */ + private static Response newError(final long requestId, + final RestStatus status, + final String code, + final String message, + final String resource) { + final String id = Long.toString(requestId); + final StringBuilder response = new StringBuilder(); + response.append(""); + response.append(""); + response.append("").append(code).append(""); + response.append("").append(message).append(""); + response.append("").append(resource).append(""); + response.append("").append(id).append(""); + response.append(""); + return new Response(status, singletonMap("x-amz-request-id", id), "application/xml", response.toString().getBytes(UTF_8)); + } +} diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml index 5fcc81209e219..11f4610f6f7b2 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/10_basic.yml @@ -1,6 +1,6 @@ -# Integration tests for Repository S3 component +# Integration tests for repository-s3 # -"Repository S3 loaded": +"Plugin repository-s3 is loaded": - do: cluster.state: {} @@ -11,3 +11,183 @@ nodes.info: {} - match: { nodes.$master.plugins.0.name: repository-s3 } +--- +"Snapshot/Restore with repository-s3": + + # Register repository + - do: + snapshot.create_repository: + repository: repository + body: + type: s3 + settings: + bucket: "bucket_test" + client: "integration_test" + canned_acl: "public-read" + storage_class: "standard" + + - match: { acknowledged: true } + + # Get repository + - do: + snapshot.get_repository: + repository: repository + + - match: {repository.settings.bucket : "bucket_test"} + - match: {repository.settings.client : "integration_test"} + - match: {repository.settings.canned_acl : "public-read"} + - match: {repository.settings.storage_class : "standard"} + - is_false: repository.settings.access_key + - is_false: repository.settings.secret_key + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository + snapshot: snapshot-one + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml deleted file mode 100644 index 74cab3edcb705..0000000000000 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yml +++ /dev/null @@ -1,24 +0,0 @@ -# Integration tests for Repository S3 component -# -"S3 repository can be registered": - - do: - snapshot.create_repository: - repository: test_repo_s3_1 - verify: false - body: - type: s3 - settings: - bucket: "my_bucket_name" - canned_acl: "public-read" - storage_class: "standard" - - # Get repository - - do: - snapshot.get_repository: - repository: test_repo_s3_1 - - - is_true: test_repo_s3_1 - - is_true: test_repo_s3_1.settings.bucket - - is_false: test_repo_s3_1.settings.access_key - - is_false: test_repo_s3_1.settings.secret_key - - match: {test_repo_s3_1.settings.canned_acl : "public-read"} From 2dc546ccec5ca3adce9790b785ca50bfd7dc4b12 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 3 Apr 2018 11:51:06 +0200 Subject: [PATCH 41/68] Don't break allocation if resize source index is missing (#29311) DiskThresholdDecider currently assumes that the source index of a resize operation (e.g. shrink) is available, and throws an IndexNotFoundException otherwise, thereby breaking any kind of shard allocation. This can be quite harmful if the source index is deleted during a shrink, or if the source index is unavailable during state recovery. While this behavior has been partly fixed in 6.1 and above (due to #26931), it relies on the order in which AllocationDeciders are executed (i.e. that ResizeAllocationDecider returns NO, ensuring that DiskThresholdDecider does not run, something that for example does not hold for the allocation explain API). This change adds a more complete fix, and also solves the situation for 5.6. --- .../allocation/decider/DiskThresholdDecider.java | 13 ++++++++----- .../decider/DiskThresholdDeciderUnitTests.java | 14 ++++++++++++++ 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 2a323af5f8435..ad30dc49a5524 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -409,11 +409,14 @@ public static long getExpectedShardSize(ShardRouting shard, RoutingAllocation al // the worst case long targetShardSize = 0; final Index mergeSourceIndex = metaData.getResizeSourceIndex(); - final IndexMetaData sourceIndexMeta = allocation.metaData().getIndexSafe(mergeSourceIndex); - final Set shardIds = IndexMetaData.selectRecoverFromShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards()); - for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { - if (shardIds.contains(shardRoutingTable.shardId())) { - targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); + final IndexMetaData sourceIndexMeta = allocation.metaData().index(mergeSourceIndex); + if (sourceIndexMeta != null) { + final Set shardIds = IndexMetaData.selectRecoverFromShards(shard.id(), + sourceIndexMeta, metaData.getNumberOfShards()); + for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { + if (shardIds.contains(shardRoutingTable.shardId())) { + targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); + } } } return targetShardSize == 0 ? defaultValue : targetShardSize; diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 3676ca8bd6e85..10fc358e4d4ea 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -342,6 +342,20 @@ public void testSizeShrinkIndex() { target2 = ShardRouting.newUnassigned(new ShardId(new Index("target2", "9101112"), 1), true, LocalShardsRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); assertEquals(1000L, DiskThresholdDecider.getExpectedShardSize(target2, allocation, 0)); + + // check that the DiskThresholdDecider still works even if the source index has been deleted + ClusterState clusterStateWithMissingSourceIndex = ClusterState.builder(clusterState) + .metaData(MetaData.builder(metaData).remove("test")) + .routingTable(RoutingTable.builder(clusterState.routingTable()).remove("test").build()) + .build(); + + allocationService.reroute(clusterState, "foo"); + + RoutingAllocation allocationWithMissingSourceIndex = new RoutingAllocation(null, + clusterStateWithMissingSourceIndex.getRoutingNodes(), clusterStateWithMissingSourceIndex, info, 0); + + assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target, allocationWithMissingSourceIndex, 42L)); + assertEquals(42L, DiskThresholdDecider.getExpectedShardSize(target2, allocationWithMissingSourceIndex, 42L)); } } From d4538df89320c09469d9d76f09dbfe3af79b36c2 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 3 Apr 2018 11:57:58 +0200 Subject: [PATCH 42/68] Improve exception handling on TransportMasterNodeAction (#29314) We have seen exceptions bubble up to the uncaught exception handler. Checking the blocks can lead for example to IndexNotFoundException when the indices are resolved. In order to make TransportMasterNodeAction more resilient against such expected exceptions, this code change wraps the execution of doStart() into a try catch and informs the listener in case of failures. --- .../master/TransportMasterNodeAction.java | 120 ++++++++++-------- .../TransportMasterNodeActionTests.java | 33 +++++ 2 files changed, 98 insertions(+), 55 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index f4a26e723dc0a..42d7da118460e 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -145,69 +145,79 @@ public void start() { } protected void doStart(ClusterState clusterState) { - final Predicate masterChangePredicate = MasterNodeChangePredicate.build(clusterState); - final DiscoveryNodes nodes = clusterState.nodes(); - if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { - // check for block, if blocked, retry, else, execute locally - final ClusterBlockException blockException = checkBlock(request, clusterState); - if (blockException != null) { - if (!blockException.retryable()) { - listener.onFailure(blockException); - } else { - logger.trace("can't execute due to a cluster block, retrying", blockException); - retry(blockException, newState -> { - ClusterBlockException newException = checkBlock(request, newState); - return (newException == null || !newException.retryable()); - }); - } - } else { - ActionListener delegate = new ActionListener() { - @Override - public void onResponse(Response response) { - listener.onResponse(response); + try { + final Predicate masterChangePredicate = MasterNodeChangePredicate.build(clusterState); + final DiscoveryNodes nodes = clusterState.nodes(); + if (nodes.isLocalNodeElectedMaster() || localExecute(request)) { + // check for block, if blocked, retry, else, execute locally + final ClusterBlockException blockException = checkBlock(request, clusterState); + if (blockException != null) { + if (!blockException.retryable()) { + listener.onFailure(blockException); + } else { + logger.trace("can't execute due to a cluster block, retrying", blockException); + retry(blockException, newState -> { + try { + ClusterBlockException newException = checkBlock(request, newState); + return (newException == null || !newException.retryable()); + } catch (Exception e) { + // accept state as block will be rechecked by doStart() and listener.onFailure() then called + logger.trace("exception occurred during cluster block checking, accepting state", e); + return true; + } + }); } + } else { + ActionListener delegate = new ActionListener() { + @Override + public void onResponse(Response response) { + listener.onResponse(response); + } - @Override - public void onFailure(Exception t) { - if (t instanceof Discovery.FailedToCommitClusterStateException + @Override + public void onFailure(Exception t) { + if (t instanceof Discovery.FailedToCommitClusterStateException || (t instanceof NotMasterException)) { - logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); - retry(t, masterChangePredicate); - } else { - listener.onFailure(t); + logger.debug(() -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t); + retry(t, masterChangePredicate); + } else { + listener.onFailure(t); + } } - } - }; - threadPool.executor(executor).execute(new ActionRunnable(delegate) { - @Override - protected void doRun() throws Exception { - masterOperation(task, request, clusterState, delegate); - } - }); - } - } else { - if (nodes.getMasterNode() == null) { - logger.debug("no known master node, scheduling a retry"); - retry(null, masterChangePredicate); + }; + threadPool.executor(executor).execute(new ActionRunnable(delegate) { + @Override + protected void doRun() throws Exception { + masterOperation(task, request, clusterState, delegate); + } + }); + } } else { - DiscoveryNode masterNode = nodes.getMasterNode(); - final String actionName = getMasterActionName(masterNode); - transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener, - TransportMasterNodeAction.this::newResponse) { - @Override - public void handleException(final TransportException exp) { - Throwable cause = exp.unwrapCause(); - if (cause instanceof ConnectTransportException) { - // we want to retry here a bit to see if a new master is elected - logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]", + if (nodes.getMasterNode() == null) { + logger.debug("no known master node, scheduling a retry"); + retry(null, masterChangePredicate); + } else { + DiscoveryNode masterNode = nodes.getMasterNode(); + final String actionName = getMasterActionName(masterNode); + transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener, + TransportMasterNodeAction.this::newResponse) { + @Override + public void handleException(final TransportException exp) { + Throwable cause = exp.unwrapCause(); + if (cause instanceof ConnectTransportException) { + // we want to retry here a bit to see if a new master is elected + logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]", actionName, nodes.getMasterNode(), exp.getDetailedMessage()); - retry(cause, masterChangePredicate); - } else { - listener.onFailure(exp); + retry(cause, masterChangePredicate); + } else { + listener.onFailure(exp); + } } - } - }); + }); + } } + } catch (Exception e) { + listener.onFailure(e); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index de65d2a3f9240..f2b18a8c8f561 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -242,6 +242,39 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } } + public void testCheckBlockThrowsException() throws InterruptedException { + boolean throwExceptionOnRetry = randomBoolean(); + Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(60)); + PlainActionFuture listener = new PlainActionFuture<>(); + + ClusterBlock block = new ClusterBlock(1, "", true, true, + false, randomFrom(RestStatus.values()), ClusterBlockLevel.ALL); + ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) + .blocks(ClusterBlocks.builder().addGlobalBlock(block)).build(); + setState(clusterService, stateWithBlock); + + new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) { + @Override + protected ClusterBlockException checkBlock(Request request, ClusterState state) { + Set blocks = state.blocks().global(); + if (throwExceptionOnRetry == false || blocks.isEmpty()) { + throw new RuntimeException("checkBlock has thrown exception"); + } + return new ClusterBlockException(blocks); + + } + }.execute(request, listener); + + if (throwExceptionOnRetry == false) { + assertListenerThrows("checkBlock has thrown exception", listener, RuntimeException.class); + } else { + assertFalse(listener.isDone()); + setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes)) + .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build()); + assertListenerThrows("checkBlock has thrown exception", listener, RuntimeException.class); + } + } + public void testForceLocalOperation() throws ExecutionException, InterruptedException { Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); From 080cefec73da631719687df2644f71041fe32d07 Mon Sep 17 00:00:00 2001 From: rzmf Date: Tue, 3 Apr 2018 12:33:44 +0200 Subject: [PATCH 43/68] Fix missing comma in ingest-node.asciidoc (#29343) --- docs/reference/ingest/ingest-node.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index b31fc1ef5ea1d..8a7c33086abe8 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -1341,7 +1341,7 @@ Here is an example of a pipeline specifying custom pattern definitions: { "grok": { "field": "message", - "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"] + "patterns": ["my %{FAVORITE_DOG:dog} is colored %{RGB:color}"], "pattern_definitions" : { "FAVORITE_DOG" : "beagle", "RGB" : "RED|GREEN|BLUE" From 3be98edc69cf7d713279028088f51009b4210dc0 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 3 Apr 2018 13:53:15 +0200 Subject: [PATCH 44/68] [DOCS] Refer back to index API for full-document updates in _update API section (#28677) This clarifies how full-document updates are performed in ES. --- docs/reference/docs/update.asciidoc | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/reference/docs/update.asciidoc b/docs/reference/docs/update.asciidoc index 18aee6094f80a..7ba7e2da63369 100644 --- a/docs/reference/docs/update.asciidoc +++ b/docs/reference/docs/update.asciidoc @@ -118,8 +118,11 @@ POST test/_doc/1/_update The update API also support passing a partial document, which will be merged into the existing document (simple recursive merge, -inner merging of objects, replacing core "keys/values" and arrays). For -example: +inner merging of objects, replacing core "keys/values" and arrays). +To fully replace the existing document, the <> should +be used instead. +The following partial update adds a new field to the +existing document: [source,js] -------------------------------------------------- From befa66ae35c5883536888f99271ebb05ce76fb69 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 3 Apr 2018 14:20:43 +0200 Subject: [PATCH 45/68] Elasticsearch 6.3.0 is now on Lucene 7.3. --- server/src/main/java/org/elasticsearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 2652afce9b4a1..be56f01fa2dc2 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -164,7 +164,7 @@ public class Version implements Comparable, ToXContentFragment { public static final int V_6_2_4_ID = 6020499; public static final Version V_6_2_4 = new Version(V_6_2_4_ID, LUCENE_7_2_1); public static final int V_6_3_0_ID = 6030099; - public static final Version V_6_3_0 = new Version(V_6_3_0_ID, LUCENE_7_2_1); + public static final Version V_6_3_0 = new Version(V_6_3_0_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); public static final int V_7_0_0_alpha1_ID = 7000001; public static final Version V_7_0_0_alpha1 = new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_3_0); From 5adfe2d8e345ed9bd296484b4e0772216a73fd59 Mon Sep 17 00:00:00 2001 From: Ragnar Nevries Date: Tue, 3 Apr 2018 14:57:42 +0200 Subject: [PATCH 46/68] [Docs] Update getting-started.asciidoc (#29294) Description after first use of search API with body does not refer to verb POST. --- docs/reference/getting-started.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index af7fc8fa6d69b..937917823f5a6 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -777,7 +777,7 @@ GET /bank/_search // CONSOLE // TEST[continued] -The difference here is that instead of passing `q=*` in the URI, we POST a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. +The difference here is that instead of passing `q=*` in the URI, we provide a JSON-style query request body to the `_search` API. We'll discuss this JSON query in the next section. //// Hidden response just so we can assert that it is indeed the same but don't have From db8ed36436c67c6915a06d1e881e2a566b43f8f5 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 3 Apr 2018 07:57:21 -0600 Subject: [PATCH 47/68] Move Nullable into core (#29341) This moves the `Nullable` annotation into the elasticsearch-core project, so it may be used without relying entirely on the server jar. This will allow us to decouple more pieces to make them smaller. In addition, there were two different `Nullable` annotations, these have all been moved to the ES version rather than the inject version. --- .../org/elasticsearch/common/Nullable.java | 0 .../snapshots/status/SnapshotStatus.java | 2 +- .../action/search/ScrollIdForNode.java | 2 +- .../common/inject/InjectorImpl.java | 2 +- .../common/inject/internal/Join.java | 1 + .../common/inject/internal/Nullable.java | 38 ------------------- .../CompositeValuesSourceConfig.java | 2 +- .../bucket/composite/SortedDocsProducer.java | 2 +- .../histogram/DateHistogramAggregator.java | 2 +- .../bucket/histogram/HistogramAggregator.java | 2 +- .../transport/ConnectionProfile.java | 4 +- 11 files changed, 10 insertions(+), 47 deletions(-) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/Nullable.java (100%) delete mode 100644 server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java diff --git a/server/src/main/java/org/elasticsearch/common/Nullable.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java similarity index 100% rename from server/src/main/java/org/elasticsearch/common/Nullable.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/Nullable.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 7b41d96c0e3ba..f7545ea0236a7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -22,7 +22,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; diff --git a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java b/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java index 59e1a3310672b..fc6585054ddf9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java +++ b/server/src/main/java/org/elasticsearch/action/search/ScrollIdForNode.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; class ScrollIdForNode { private final String node; diff --git a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index 817d52ea7435c..3721c0cc8b178 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/server/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -17,6 +17,7 @@ package org.elasticsearch.common.inject; import org.elasticsearch.common.Classes; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.internal.Annotations; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; @@ -27,7 +28,6 @@ import org.elasticsearch.common.inject.internal.LinkedBindingImpl; import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl; import org.elasticsearch.common.inject.internal.MatcherAndConverter; -import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.inject.internal.Scoping; import org.elasticsearch.common.inject.internal.SourceProvider; import org.elasticsearch.common.inject.internal.ToStringBuilder; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java index c876ea4cb9da7..e44bed9d88acb 100644 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java +++ b/server/src/main/java/org/elasticsearch/common/inject/internal/Join.java @@ -16,6 +16,7 @@ package org.elasticsearch.common.inject.internal; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.util.CollectionUtils; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java b/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java deleted file mode 100644 index 764e93473dd35..0000000000000 --- a/server/src/main/java/org/elasticsearch/common/inject/internal/Nullable.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (C) 2007 Google Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.elasticsearch.common.inject.internal; - -import java.lang.annotation.Documented; -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * The presence of this annotation on a method parameter indicates that - * {@code null} is an acceptable value for that parameter. It should not be - * used for parameters of primitive types. - *

- * This annotation may be used with the Google Web Toolkit (GWT). - * - * @author Kevin Bourrillion - */ -@Documented -@Retention(RetentionPolicy.RUNTIME) -@Target({ElementType.PARAMETER, ElementType.FIELD}) -public @interface Nullable { -} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java index ca4b38dc1f4d5..8756eed6feb78 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceConfig.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.support.ValuesSource; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java index d9d927ff66061..ef2b37d9c081b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SortedDocsProducer.java @@ -25,7 +25,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.Bits; import org.apache.lucene.util.DocIdSetBuilder; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.search.aggregations.LeafBucketCollector; import java.io.IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8d879b88b3dca..c32cedb4427e8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.util.LongHash; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index 4938daad65bfc..a0e4871a7df42 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java index a36c9f6f77b9b..e14f684bf72ef 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectionProfile.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.transport; -import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.TimeValue; import java.util.ArrayList; @@ -41,7 +41,7 @@ public final class ConnectionProfile { */ public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType, @Nullable TimeValue connectTimeout, - @Nullable TimeValue handshakeTimeout) { + @Nullable TimeValue handshakeTimeout) { Builder builder = new Builder(); builder.addConnections(1, channelType); final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); From dc1c16964aced0927dd92b082a160d4574d00370 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 3 Apr 2018 16:16:21 +0200 Subject: [PATCH 48/68] [Docs] Correct experimental note formatting --- docs/reference/search/rank-eval.asciidoc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/reference/search/rank-eval.asciidoc b/docs/reference/search/rank-eval.asciidoc index fa75374110ef6..e2998086c8917 100644 --- a/docs/reference/search/rank-eval.asciidoc +++ b/docs/reference/search/rank-eval.asciidoc @@ -1,9 +1,7 @@ [[search-rank-eval]] == Ranking Evaluation API -experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, -as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort -approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] +experimental[The ranking evaluation API is experimental and may be changed or removed completely in a future release, as well as change in non-backwards compatible ways on minor versions updates. Elastic will take a best effort approach to fix any issues, but experimental features are not subject to the support SLA of official GA features.] The ranking evaluation API allows to evaluate the quality of ranked search results over a set of typical search queries. Given this set of queries and a From 8cdd950056b722e7086f48fb84cf6c29ccf31bd3 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 3 Apr 2018 16:44:26 +0200 Subject: [PATCH 49/68] Fix some query extraction bugs. (#29283) While playing with the percolator I found two bugs: - Sometimes we set a min_should_match that is greater than the number of extractions. While this doesn't cause direct trouble, it does when the query is nested into a boolean query and the boolean query tries to compute the min_should_match for the entire query based on its own min_should_match and those of the sub queries. So I changed the code to throw an exception when min_should_match is greater than the number of extractions. - Boolean queries claim matches are verified when in fact they shouldn't. This is due to the fact that boolean queries assume that they are verified if all sub clauses are verified but things are more complex than that, eg. conjunctions that are nested in a disjunction or disjunctions that are nested in a conjunction can generally not be verified without running the query. --- .../percolator/QueryAnalyzer.java | 48 ++++-- .../percolator/CandidateQueryTests.java | 9 +- .../percolator/QueryAnalyzerTests.java | 146 ++++++++++++++++-- 3 files changed, 170 insertions(+), 33 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java index 24b210c29d584..8f1bb2a9310d3 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/QueryAnalyzer.java @@ -143,7 +143,7 @@ static Result analyze(Query query, Version indexVersion) { } private static BiFunction matchNoDocsQuery() { - return (query, version) -> new Result(true, Collections.emptySet(), 1); + return (query, version) -> new Result(true, Collections.emptySet(), 0); } private static BiFunction matchAllDocsQuery() { @@ -179,28 +179,28 @@ private static BiFunction termInSetQuery() { for (BytesRef term = iterator.next(); term != null; term = iterator.next()) { terms.add(new QueryExtraction(new Term(iterator.field(), term))); } - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } private static BiFunction synonymQuery() { return (query, version) -> { Set terms = ((SynonymQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } private static BiFunction commonTermsQuery() { return (query, version) -> { Set terms = ((CommonTermsQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(false, terms, 1); + return new Result(false, terms, Math.min(1, terms.size())); }; } private static BiFunction blendedTermQuery() { return (query, version) -> { Set terms = ((BlendedTermQuery) query).getTerms().stream().map(QueryExtraction::new).collect(toSet()); - return new Result(true, terms, 1); + return new Result(true, terms, Math.min(1, terms.size())); }; } @@ -208,7 +208,7 @@ private static BiFunction phraseQuery() { return (query, version) -> { Term[] terms = ((PhraseQuery) query).getTerms(); if (terms.length == 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } if (version.onOrAfter(Version.V_6_1_0)) { @@ -232,7 +232,7 @@ private static BiFunction multiPhraseQuery() { return (query, version) -> { Term[][] terms = ((MultiPhraseQuery) query).getTermArrays(); if (terms.length == 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } if (version.onOrAfter(Version.V_6_1_0)) { @@ -297,7 +297,7 @@ private static BiFunction spanOrQuery() { for (SpanQuery clause : spanOrQuery.getClauses()) { terms.addAll(analyze(clause, version).extractions); } - return new Result(false, terms, 1); + return new Result(false, terms, Math.min(1, terms.size())); }; } @@ -334,6 +334,9 @@ private static BiFunction booleanQuery() { numOptionalClauses++; } } + if (minimumShouldMatch > numOptionalClauses) { + return new Result(false, Collections.emptySet(), 0); + } if (numRequiredClauses > 0) { if (version.onOrAfter(Version.V_6_1_0)) { UnsupportedQueryException uqe = null; @@ -345,7 +348,12 @@ private static BiFunction booleanQuery() { // since they are completely optional. try { - results.add(analyze(clause.getQuery(), version)); + Result subResult = analyze(clause.getQuery(), version); + if (subResult.matchAllDocs == false && subResult.extractions.isEmpty()) { + // doesn't match anything + return subResult; + } + results.add(subResult); } catch (UnsupportedQueryException e) { uqe = e; } @@ -400,7 +408,11 @@ private static BiFunction booleanQuery() { } msm += resultMsm; - verified &= result.verified; + if (result.verified == false + // If some inner extractions are optional, the result can't be verified + || result.minimumShouldMatch < result.extractions.size()) { + verified = false; + } matchAllDocs &= result.matchAllDocs; extractions.addAll(result.extractions); } @@ -492,7 +504,7 @@ private static BiFunction pointRangeQuery() { // Need to check whether upper is not smaller than lower, otherwise NumericUtils.subtract(...) fails IAE // If upper is really smaller than lower then we deal with like MatchNoDocsQuery. (verified and no extractions) if (new BytesRef(lowerPoint).compareTo(new BytesRef(upperPoint)) > 0) { - return new Result(true, Collections.emptySet(), 1); + return new Result(true, Collections.emptySet(), 0); } byte[] interval = new byte[16]; @@ -537,7 +549,15 @@ private static Result handleDisjunction(List disjunctions, int requiredSh for (int i = 0; i < disjunctions.size(); i++) { Query disjunct = disjunctions.get(i); Result subResult = analyze(disjunct, version); - verified &= subResult.verified; + if (subResult.verified == false + // one of the sub queries requires more than one term to match, we can't + // verify it with a single top-level min_should_match + || subResult.minimumShouldMatch > 1 + // One of the inner clauses has multiple extractions, we won't be able to + // verify it with a single top-level min_should_match + || (subResult.extractions.size() > 1 && requiredShouldClauses > 1)) { + verified = false; + } if (subResult.matchAllDocs) { numMatchAllClauses++; } @@ -683,6 +703,10 @@ static class Result { final boolean matchAllDocs; Result(boolean verified, Set extractions, int minimumShouldMatch) { + if (minimumShouldMatch > extractions.size()) { + throw new IllegalArgumentException("minimumShouldMatch can't be greater than the number of extractions: " + + minimumShouldMatch + " > " + extractions.size()); + } this.extractions = extractions; this.verified = verified; this.minimumShouldMatch = minimumShouldMatch; diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java index 59f4e091140ea..27d72b2926749 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/CandidateQueryTests.java @@ -210,12 +210,13 @@ public void testDuel() throws Exception { new BytesRef(randomFrom(stringContent.get(field1))))); queryFunctions.add(() -> new TermInSetQuery(field2, new BytesRef(randomFrom(stringContent.get(field1))), new BytesRef(randomFrom(stringContent.get(field1))))); - int numRandomBoolQueries = randomIntBetween(16, 32); + // many iterations with boolean queries, which are the most complex queries to deal with when nested + int numRandomBoolQueries = 1000; for (int i = 0; i < numRandomBoolQueries; i++) { queryFunctions.add(() -> createRandomBooleanQuery(1, stringFields, stringContent, intFieldType, intValues)); } queryFunctions.add(() -> { - int numClauses = randomIntBetween(1, 16); + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); List clauses = new ArrayList<>(); for (int i = 0; i < numClauses; i++) { String field = randomFrom(stringFields); @@ -266,7 +267,7 @@ public void testDuel() throws Exception { private BooleanQuery createRandomBooleanQuery(int depth, List fields, Map> content, MappedFieldType intFieldType, List intValues) { BooleanQuery.Builder builder = new BooleanQuery.Builder(); - int numClauses = randomIntBetween(1, 16); + int numClauses = randomIntBetween(1, 1 << randomIntBetween(2, 4)); // use low numbers of clauses more often int numShouldClauses = 0; boolean onlyShouldClauses = rarely(); for (int i = 0; i < numClauses; i++) { @@ -313,7 +314,7 @@ private BooleanQuery createRandomBooleanQuery(int depth, List fields, Ma numShouldClauses++; } } - builder.setMinimumNumberShouldMatch(numShouldClauses); + builder.setMinimumNumberShouldMatch(randomIntBetween(0, numShouldClauses)); return builder.build(); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 5968f8c3f8327..7bcdcd2e1f695 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -44,6 +44,7 @@ import org.apache.lucene.search.TermInSetQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.join.QueryBitSetProducer; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.spans.SpanFirstQuery; @@ -227,23 +228,87 @@ public void testExtractQueryMetadata_booleanQuery_pre6dot1() { public void testExtractQueryMetadata_booleanQuery_msm() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); builder.setMinimumNumberShouldMatch(2); - TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); + Term term1 = new Term("_field", "_term1"); + TermQuery termQuery1 = new TermQuery(term1); builder.add(termQuery1, BooleanClause.Occur.SHOULD); - TermQuery termQuery2 = new TermQuery(new Term("_field", "_term2")); + Term term2 = new Term("_field", "_term2"); + TermQuery termQuery2 = new TermQuery(term2); builder.add(termQuery2, BooleanClause.Occur.SHOULD); - TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); + Term term3 = new Term("_field", "_term3"); + TermQuery termQuery3 = new TermQuery(term3); builder.add(termQuery3, BooleanClause.Occur.SHOULD); BooleanQuery booleanQuery = builder.build(); Result result = analyze(booleanQuery, Version.CURRENT); assertThat(result.verified, is(true)); assertThat(result.minimumShouldMatch, equalTo(2)); - List extractions = new ArrayList<>(result.extractions); - extractions.sort(Comparator.comparing(extraction -> extraction.term)); - assertThat(extractions.size(), equalTo(3)); - assertThat(extractions.get(0).term, equalTo(new Term("_field", "_term1"))); - assertThat(extractions.get(1).term, equalTo(new Term("_field", "_term2"))); - assertThat(extractions.get(2).term, equalTo(new Term("_field", "_term3"))); + assertTermsEqual(result.extractions, term1, term2, term3); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD) + .setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + assertTermsEqual(result.extractions, term1, term2, term3); + + Term term4 = new Term("_field", "_term4"); + TermQuery termQuery4 = new TermQuery(term4); + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.MUST) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(new BooleanQuery.Builder() + .add(termQuery3, Occur.MUST) + .add(termQuery4, Occur.FILTER) + .build(), Occur.SHOULD); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + assertTermsEqual(result.extractions, term1, term2, term3, term4); + + Term term5 = new Term("_field", "_term5"); + TermQuery termQuery5 = new TermQuery(term5); + builder.add(termQuery5, Occur.SHOULD); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder.setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(3)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder.setMinimumNumberShouldMatch(3); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + assertThat(result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(5)); + assertTermsEqual(result.extractions, term1, term2, term3, term4, term5); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(new BooleanQuery.Builder().setMinimumNumberShouldMatch(1).build(), Occur.SHOULD) + .setMinimumNumberShouldMatch(2); + booleanQuery = builder.build(); + result = analyze(booleanQuery, Version.CURRENT); + // ideally it would return no extractions, but the fact + // that it doesn't consider them verified is probably good enough + assertFalse(result.verified); } public void testExtractQueryMetadata_booleanQuery_msm_pre6dot1() { @@ -353,7 +418,7 @@ public void testExactMatch_booleanQuery() { assertThat(result.minimumShouldMatch, equalTo(1)); builder = new BooleanQuery.Builder(); - builder.setMinimumNumberShouldMatch(randomIntBetween(2, 32)); + builder.setMinimumNumberShouldMatch(randomIntBetween(1, 2)); builder.add(termQuery1, BooleanClause.Occur.SHOULD); builder.add(termQuery2, BooleanClause.Occur.SHOULD); result = analyze(builder.build(), Version.CURRENT); @@ -379,6 +444,54 @@ public void testExactMatch_booleanQuery() { result = analyze(builder.build(), Version.CURRENT); assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder(); + builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER); + builder.add(termQuery2, BooleanClause.Occur.MUST_NOT); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.FILTER) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Inner clause that is not a pure disjunction, so candidate matches are not verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Inner clause that is a pure disjunction, so candidate matches are verified", result.verified, is(true)); + assertThat(result.minimumShouldMatch, equalTo(1)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.SHOULD) + .add(termQuery2, Occur.SHOULD) + .build(), Occur.MUST) + .add(termQuery3, Occur.FILTER); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Disjunctions of conjunctions can't be verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(2)); + + builder = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(termQuery1, Occur.MUST) + .add(termQuery2, Occur.FILTER) + .build(), Occur.SHOULD) + .add(termQuery3, Occur.SHOULD); + result = analyze(builder.build(), Version.CURRENT); + assertThat("Conjunctions of disjunctions can't be verified", result.verified, is(false)); + assertThat(result.minimumShouldMatch, equalTo(1)); } public void testBooleanQueryWithMustAndShouldClauses() { @@ -564,16 +677,15 @@ public void testExtractQueryMetadata_matchNoDocsQuery() { Result result = analyze(new MatchNoDocsQuery("sometimes there is no reason at all"), Version.CURRENT); assertThat(result.verified, is(true)); assertEquals(0, result.extractions.size()); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.MUST); bq.add(new MatchNoDocsQuery("sometimes there is no reason at all"), BooleanClause.Occur.MUST); result = analyze(bq.build(), Version.CURRENT); assertThat(result.verified, is(true)); - assertEquals(1, result.extractions.size()); - assertThat(result.minimumShouldMatch, equalTo(2)); - assertTermsEqual(result.extractions, new Term("field", "value")); + assertEquals(0, result.extractions.size()); + assertThat(result.minimumShouldMatch, equalTo(0)); bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "value")), BooleanClause.Occur.SHOULD); @@ -785,7 +897,7 @@ public void testSynonymQuery() { SynonymQuery query = new SynonymQuery(); Result result = analyze(query, Version.CURRENT); assertThat(result.verified, is(true)); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); assertThat(result.extractions.isEmpty(), is(true)); query = new SynonymQuery(new Term("_field", "_value1"), new Term("_field", "_value2")); @@ -997,7 +1109,7 @@ public void testPointRangeQuery_lowerUpperReversed() { Query query = IntPoint.newRangeQuery("_field", 20, 10); Result result = analyze(query, Version.CURRENT); assertTrue(result.verified); - assertThat(result.minimumShouldMatch, equalTo(1)); + assertThat(result.minimumShouldMatch, equalTo(0)); assertThat(result.extractions.size(), equalTo(0)); } @@ -1179,7 +1291,7 @@ public void testExtractQueryMetadata_duplicatedClauses() { BooleanClause.Occur.SHOULD ); result = analyze(builder.build(), Version.CURRENT); - assertThat(result.verified, is(true)); + assertThat(result.verified, is(false)); assertThat(result.matchAllDocs, is(false)); assertThat(result.minimumShouldMatch, equalTo(2)); assertTermsEqual(result.extractions, new Term("field", "value1"), new Term("field", "value2"), From 569d0c0e897478be4f05a1daba8d217a526e0eeb Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 3 Apr 2018 16:45:25 +0200 Subject: [PATCH 50/68] Improve similarity integration. (#29187) This improves the way similarities are plugged in in order to: - reject the classic similarity on 7.x indices and emit a deprecation warning otherwise - reject unkwown parameters on 7.x indices and emit a deprecation warning otherwise Even though this breaks the plugin API, I'd like to backport to 7.x so that users can get deprecation warnings when they are doing something that will become unsupported in the future. Closes #23208 Closes #29035 --- .../index-modules/similarity.asciidoc | 18 +- .../mapping/params/similarity.asciidoc | 9 +- .../migration/migrate_7_0/mappings.asciidoc | 13 + .../join/query/HasChildQueryBuilderTests.java | 4 +- .../LegacyHasChildQueryBuilderTests.java | 6 +- .../metadata/MetaDataIndexUpgradeService.java | 11 +- .../org/elasticsearch/index/IndexModule.java | 24 +- .../AbstractSimilarityProvider.java | 82 ----- .../similarity/BM25SimilarityProvider.java | 59 ---- .../similarity/BooleanSimilarityProvider.java | 48 --- .../similarity/ClassicSimilarityProvider.java | 52 --- .../similarity/DFISimilarityProvider.java | 79 ----- .../similarity/DFRSimilarityProvider.java | 123 ------- .../similarity/IBSimilarityProvider.java | 113 ------- .../LMDirichletSimilarityProvider.java | 52 --- .../LMJelinekMercerSimilarityProvider.java | 52 --- .../ScriptedSimilarityProvider.java | 19 +- .../index/similarity/SimilarityProvider.java | 36 +-- .../index/similarity/SimilarityProviders.java | 300 ++++++++++++++++++ .../index/similarity/SimilarityService.java | 122 ++++--- .../elasticsearch/index/IndexModuleTests.java | 14 +- .../similarity/SimilarityServiceTests.java | 6 +- .../index/similarity/SimilarityTests.java | 59 +++- .../indices/IndicesServiceTests.java | 8 +- .../similarity/SimilarityIT.java | 8 +- .../index/mapper/FieldTypeTestCase.java | 9 +- 26 files changed, 513 insertions(+), 813 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java delete mode 100644 server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java create mode 100644 server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index d0fd5dd399867..40f7de90c0ab2 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -82,20 +82,6 @@ This similarity has the following options: Type name: `BM25` -[float] -[[classic-similarity]] -==== Classic similarity - -The classic similarity that is based on the TF/IDF model. This -similarity has the following option: - -`discount_overlaps`:: - Determines whether overlap tokens (Tokens with - 0 position increment) are ignored when computing norm. By default this - is true, meaning overlap tokens do not count when computing norms. - -Type name: `classic` - [float] [[dfr]] ==== DFR similarity @@ -541,7 +527,7 @@ PUT /index "index": { "similarity": { "default": { - "type": "classic" + "type": "boolean" } } } @@ -563,7 +549,7 @@ PUT /index/_settings "index": { "similarity": { "default": { - "type": "classic" + "type": "boolean" } } } diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 3509cd0cf8eb5..a0be0fb3ccbeb 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -44,13 +44,9 @@ PUT my_index "default_field": { <1> "type": "text" }, - "classic_field": { - "type": "text", - "similarity": "classic" <2> - }, "boolean_sim_field": { "type": "text", - "similarity": "boolean" <3> + "similarity": "boolean" <2> } } } @@ -59,5 +55,4 @@ PUT my_index -------------------------------------------------- // CONSOLE <1> The `default_field` uses the `BM25` similarity. -<2> The `classic_field` uses the `classic` similarity (ie TF/IDF). -<3> The `boolean_sim_field` uses the `boolean` similarity. +<2> The `boolean_sim_field` uses the `boolean` similarity. diff --git a/docs/reference/migration/migrate_7_0/mappings.asciidoc b/docs/reference/migration/migrate_7_0/mappings.asciidoc index 8f1474aa57cbe..b0ab90546c3a8 100644 --- a/docs/reference/migration/migrate_7_0/mappings.asciidoc +++ b/docs/reference/migration/migrate_7_0/mappings.asciidoc @@ -24,3 +24,16 @@ the index setting `index.mapping.nested_objects.limit`. ==== The `update_all_types` option has been removed This option is useless now that all indices have at most one type. + +=== The `classic` similarity has been removed + +The `classic` similarity relied on coordination factors for scoring to be good +in presence of stopwords in the query. This feature has been removed from +Lucene, which means that the `classic` similarity now produces scores of lower +quality. It is advised to switch to `BM25` instead, which is widely accepted +as a better alternative. + +=== Similarities fail when unsupported options are provided + +An error will now be thrown when unknown configuration options are provided +to similarities. Such unknown parameters were ignored before. diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 4f4d965d59433..2d7215c239821 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -336,9 +336,7 @@ public void testNonDefaultSimilarity() throws Exception { hasChildQuery(CHILD_DOC, new TermQueryBuilder("custom_string", "value"), ScoreMode.None); HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext); Similarity expected = SimilarityService.BUILT_IN.get(similarity) - .create(similarity, Settings.EMPTY, - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null) - .get(); + .apply(Settings.EMPTY, Version.CURRENT, null); assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); } diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java index a52cc1db3d088..3eb16a925676c 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/LegacyHasChildQueryBuilderTests.java @@ -87,7 +87,7 @@ protected Collection> getPlugins() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - similarity = randomFrom("classic", "BM25"); + similarity = randomFrom("boolean", "BM25"); // TODO: use a single type when inner hits have been changed to work with join field, // this test randomly generates queries with inner hits mapperService.merge(PARENT_TYPE, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(PARENT_TYPE, @@ -323,9 +323,7 @@ public void testNonDefaultSimilarity() throws Exception { hasChildQuery(CHILD_TYPE, new TermQueryBuilder("custom_string", "value"), ScoreMode.None); HasChildQueryBuilder.LateParsingQuery query = (HasChildQueryBuilder.LateParsingQuery) hasChildQueryBuilder.toQuery(shardContext); Similarity expected = SimilarityService.BUILT_IN.get(similarity) - .create(similarity, Settings.EMPTY, - Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), null) - .get(); + .apply(Settings.EMPTY, Version.CURRENT, null); assertThat(((PerFieldSimilarityWrapper) query.getSimilarity()).get("custom_string"), instanceOf(expected.getClass())); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 4b6a898a3a9f1..6d18f5e01b5d3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -20,7 +20,9 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; @@ -31,8 +33,8 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.script.ScriptService; import java.util.AbstractMap; import java.util.Collection; @@ -142,14 +144,15 @@ private void checkMappingsCompatibility(IndexMetaData indexMetaData) { IndexSettings indexSettings = new IndexSettings(indexMetaData, this.settings); - final Map similarityMap = new AbstractMap() { + final Map> similarityMap + = new AbstractMap>() { @Override public boolean containsKey(Object key) { return true; } @Override - public SimilarityProvider.Factory get(Object key) { + public TriFunction get(Object key) { assert key instanceof String : "key must be a string but was: " + key.getClass(); return SimilarityService.BUILT_IN.get(SimilarityService.DEFAULT_SIMILARITY); } @@ -157,7 +160,7 @@ public SimilarityProvider.Factory get(Object key) { // this entrySet impl isn't fully correct but necessary as SimilarityService will iterate // over all similarities @Override - public Set> entrySet() { + public Set>> entrySet() { return Collections.emptySet(); } }; diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 869f8c9ca72db..767ef48733937 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -19,9 +19,13 @@ package org.elasticsearch.index; +import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Version; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -39,9 +43,6 @@ import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesQueryCache; @@ -68,10 +69,10 @@ /** * IndexModule represents the central extension point for index level custom implementations like: *

    - *
  • {@link SimilarityProvider} - New {@link SimilarityProvider} implementations can be registered through - * {@link #addSimilarity(String, SimilarityProvider.Factory)} while existing Providers can be referenced through Settings under the + *
  • {@link Similarity} - New {@link Similarity} implementations can be registered through + * {@link #addSimilarity(String, TriFunction)} while existing Providers can be referenced through Settings under the * {@link IndexModule#SIMILARITY_SETTINGS_PREFIX} prefix along with the "type" value. For example, to reference the - * {@link BM25SimilarityProvider}, the configuration "index.similarity.my_similarity.type : "BM25" can be used.
  • + * {@link BM25Similarity}, the configuration "index.similarity.my_similarity.type : "BM25" can be used. *
  • {@link IndexStore} - Custom {@link IndexStore} instances can be registered via {@link #addIndexStore(String, Function)}
  • *
  • {@link IndexEventListener} - Custom {@link IndexEventListener} instances can be registered via * {@link #addIndexEventListener(IndexEventListener)}
  • @@ -107,7 +108,7 @@ public final class IndexModule { final SetOnce engineFactory = new SetOnce<>(); private SetOnce indexSearcherWrapper = new SetOnce<>(); private final Set indexEventListeners = new HashSet<>(); - private final Map similarities = new HashMap<>(); + private final Map> similarities = new HashMap<>(); private final Map> storeTypes = new HashMap<>(); private final SetOnce> forceQueryCacheProvider = new SetOnce<>(); private final List searchOperationListeners = new ArrayList<>(); @@ -246,12 +247,17 @@ public void addIndexStore(String type, Function provi /** - * Registers the given {@link SimilarityProvider} with the given name + * Registers the given {@link Similarity} with the given name. + * The function takes as parameters:
      + *
    • settings for this similarity + *
    • version of Elasticsearch when the index was created + *
    • ScriptService, for script-based similarities + *
    * * @param name Name of the SimilarityProvider * @param similarity SimilarityProvider to register */ - public void addSimilarity(String name, SimilarityProvider.Factory similarity) { + public void addSimilarity(String name, TriFunction similarity) { ensureNotFrozen(); if (similarities.containsKey(name) || SimilarityService.BUILT_IN.containsKey(name)) { throw new IllegalArgumentException("similarity for name: [" + name + " is already registered"); diff --git a/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java deleted file mode 100644 index fef43d6f5deaf..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.NormalizationH1; -import org.apache.lucene.search.similarities.NormalizationH2; -import org.apache.lucene.search.similarities.NormalizationH3; -import org.apache.lucene.search.similarities.NormalizationZ; -import org.elasticsearch.common.settings.Settings; - -/** - * Abstract implementation of {@link SimilarityProvider} providing common behaviour - */ -public abstract class AbstractSimilarityProvider implements SimilarityProvider { - - protected static final Normalization NO_NORMALIZATION = new Normalization.NoNormalization(); - - private final String name; - - /** - * Creates a new AbstractSimilarityProvider with the given name - * - * @param name Name of the Provider - */ - protected AbstractSimilarityProvider(String name) { - this.name = name; - } - - /** - * {@inheritDoc} - */ - @Override - public String name() { - return this.name; - } - - /** - * Parses the given Settings and creates the appropriate {@link Normalization} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Normalization parseNormalization(Settings settings) { - String normalization = settings.get("normalization"); - - if ("no".equals(normalization)) { - return NO_NORMALIZATION; - } else if ("h1".equals(normalization)) { - float c = settings.getAsFloat("normalization.h1.c", 1f); - return new NormalizationH1(c); - } else if ("h2".equals(normalization)) { - float c = settings.getAsFloat("normalization.h2.c", 1f); - return new NormalizationH2(c); - } else if ("h3".equals(normalization)) { - float c = settings.getAsFloat("normalization.h3.c", 800f); - return new NormalizationH3(c); - } else if ("z".equals(normalization)) { - float z = settings.getAsFloat("normalization.z.z", 0.30f); - return new NormalizationZ(z); - } else { - throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java deleted file mode 100644 index ad49e7e9cc901..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/BM25SimilarityProvider.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for the {@link BM25Similarity}. - *

    - * Configuration options available: - *

      - *
    • k1
    • - *
    • b
    • - *
    • discount_overlaps
    • - *
    - * @see BM25Similarity For more information about configuration - */ -public class BM25SimilarityProvider extends AbstractSimilarityProvider { - - private final BM25Similarity similarity; - - public BM25SimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float k1 = settings.getAsFloat("k1", 1.2f); - float b = settings.getAsFloat("b", 0.75f); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); - - this.similarity = new BM25Similarity(k1, b); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java deleted file mode 100644 index e5db045f3716f..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/BooleanSimilarityProvider.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for the {@link BooleanSimilarity}, - * which is a simple similarity that gives terms a score equal - * to their query boost only. This is useful in situations where - * a field does not need to be scored by a full-text ranking - * algorithm, but rather all that matters is whether the query - * terms matched or not. - */ -public class BooleanSimilarityProvider extends AbstractSimilarityProvider { - - private final BooleanSimilarity similarity = new BooleanSimilarity(); - - public BooleanSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - } - - /** - * {@inheritDoc} - */ - @Override - public BooleanSimilarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java deleted file mode 100644 index 419321996a301..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/ClassicSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link ClassicSimilarity}. - *

    - * Configuration options available: - *

      - *
    • discount_overlaps
    • - *
    - * @see ClassicSimilarity For more information about configuration - */ -public class ClassicSimilarityProvider extends AbstractSimilarityProvider { - - private final ClassicSimilarity similarity = new ClassicSimilarity(); - - public ClassicSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - /** - * {@inheritDoc} - */ - @Override - public ClassicSimilarity get() { - return similarity; - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java deleted file mode 100644 index 324314b2669b2..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/DFISimilarityProvider.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.DFISimilarity; -import org.apache.lucene.search.similarities.Independence; -import org.apache.lucene.search.similarities.IndependenceChiSquared; -import org.apache.lucene.search.similarities.IndependenceSaturated; -import org.apache.lucene.search.similarities.IndependenceStandardized; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for the {@link DFISimilarity}. - *

    - * Configuration options available: - *

      - *
    • independence_measure
    • - *
    • discount_overlaps
    • - *
    - * @see DFISimilarity For more information about configuration - */ -public class DFISimilarityProvider extends AbstractSimilarityProvider { - // the "basic models" of divergence from independence - private static final Map INDEPENDENCE_MEASURES; - static { - Map measures = new HashMap<>(); - measures.put("standardized", new IndependenceStandardized()); - measures.put("saturated", new IndependenceSaturated()); - measures.put("chisquared", new IndependenceChiSquared()); - INDEPENDENCE_MEASURES = unmodifiableMap(measures); - } - - private final DFISimilarity similarity; - - public DFISimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); - Independence measure = parseIndependence(settings); - this.similarity = new DFISimilarity(measure); - this.similarity.setDiscountOverlaps(discountOverlaps); - } - - private Independence parseIndependence(Settings settings) { - String name = settings.get("independence_measure"); - Independence measure = INDEPENDENCE_MEASURES.get(name); - if (measure == null) { - throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "]"); - } - return measure; - } - - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java deleted file mode 100644 index 0d47e86da0182..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.AfterEffect; -import org.apache.lucene.search.similarities.AfterEffectB; -import org.apache.lucene.search.similarities.AfterEffectL; -import org.apache.lucene.search.similarities.BasicModel; -import org.apache.lucene.search.similarities.BasicModelBE; -import org.apache.lucene.search.similarities.BasicModelD; -import org.apache.lucene.search.similarities.BasicModelG; -import org.apache.lucene.search.similarities.BasicModelIF; -import org.apache.lucene.search.similarities.BasicModelIn; -import org.apache.lucene.search.similarities.BasicModelIne; -import org.apache.lucene.search.similarities.BasicModelP; -import org.apache.lucene.search.similarities.DFRSimilarity; -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for {@link DFRSimilarity}. - *

    - * Configuration options available: - *

      - *
    • basic_model
    • - *
    • after_effect
    • - *
    • normalization
    • - *
    - * @see DFRSimilarity For more information about configuration - */ -public class DFRSimilarityProvider extends AbstractSimilarityProvider { - private static final Map BASIC_MODELS; - private static final Map AFTER_EFFECTS; - - static { - Map models = new HashMap<>(); - models.put("be", new BasicModelBE()); - models.put("d", new BasicModelD()); - models.put("g", new BasicModelG()); - models.put("if", new BasicModelIF()); - models.put("in", new BasicModelIn()); - models.put("ine", new BasicModelIne()); - models.put("p", new BasicModelP()); - BASIC_MODELS = unmodifiableMap(models); - - Map effects = new HashMap<>(); - effects.put("no", new AfterEffect.NoAfterEffect()); - effects.put("b", new AfterEffectB()); - effects.put("l", new AfterEffectL()); - AFTER_EFFECTS = unmodifiableMap(effects); - } - - private final DFRSimilarity similarity; - - public DFRSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - BasicModel basicModel = parseBasicModel(settings); - AfterEffect afterEffect = parseAfterEffect(settings); - Normalization normalization = parseNormalization(settings); - this.similarity = new DFRSimilarity(basicModel, afterEffect, normalization); - } - - /** - * Parses the given Settings and creates the appropriate {@link BasicModel} - * - * @param settings Settings to parse - * @return {@link BasicModel} referred to in the Settings - */ - protected BasicModel parseBasicModel(Settings settings) { - String basicModel = settings.get("basic_model"); - BasicModel model = BASIC_MODELS.get(basicModel); - if (model == null) { - throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "]"); - } - return model; - } - - /** - * Parses the given Settings and creates the appropriate {@link AfterEffect} - * - * @param settings Settings to parse - * @return {@link AfterEffect} referred to in the Settings - */ - protected AfterEffect parseAfterEffect(Settings settings) { - String afterEffect = settings.get("after_effect"); - AfterEffect effect = AFTER_EFFECTS.get(afterEffect); - if (effect == null) { - throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]"); - } - return effect; - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java deleted file mode 100644 index a43276bbfaa82..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.Distribution; -import org.apache.lucene.search.similarities.DistributionLL; -import org.apache.lucene.search.similarities.DistributionSPL; -import org.apache.lucene.search.similarities.IBSimilarity; -import org.apache.lucene.search.similarities.Lambda; -import org.apache.lucene.search.similarities.LambdaDF; -import org.apache.lucene.search.similarities.LambdaTTF; -import org.apache.lucene.search.similarities.Normalization; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * {@link SimilarityProvider} for {@link IBSimilarity}. - *

    - * Configuration options available: - *

      - *
    • distribution
    • - *
    • lambda
    • - *
    • normalization
    • - *
    - * @see IBSimilarity For more information about configuration - */ -public class IBSimilarityProvider extends AbstractSimilarityProvider { - - private static final Map DISTRIBUTIONS; - private static final Map LAMBDAS; - - static { - Map distributions = new HashMap<>(); - distributions.put("ll", new DistributionLL()); - distributions.put("spl", new DistributionSPL()); - DISTRIBUTIONS = unmodifiableMap(distributions); - - Map lamdas = new HashMap<>(); - lamdas.put("df", new LambdaDF()); - lamdas.put("ttf", new LambdaTTF()); - LAMBDAS = unmodifiableMap(lamdas); - } - - private final IBSimilarity similarity; - - public IBSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - Distribution distribution = parseDistribution(settings); - Lambda lambda = parseLambda(settings); - Normalization normalization = parseNormalization(settings); - this.similarity = new IBSimilarity(distribution, lambda, normalization); - } - - /** - * Parses the given Settings and creates the appropriate {@link Distribution} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Distribution parseDistribution(Settings settings) { - String rawDistribution = settings.get("distribution"); - Distribution distribution = DISTRIBUTIONS.get(rawDistribution); - if (distribution == null) { - throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); - } - return distribution; - } - - /** - * Parses the given Settings and creates the appropriate {@link Lambda} - * - * @param settings Settings to parse - * @return {@link Normalization} referred to in the Settings - */ - protected Lambda parseLambda(Settings settings) { - String rawLambda = settings.get("lambda"); - Lambda lambda = LAMBDAS.get(rawLambda); - if (lambda == null) { - throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); - } - return lambda; - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java deleted file mode 100644 index 170a7e42133c9..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/LMDirichletSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.LMDirichletSimilarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link LMDirichletSimilarity}. - *

    - * Configuration options available: - *

      - *
    • mu
    • - *
    - * @see LMDirichletSimilarity For more information about configuration - */ -public class LMDirichletSimilarityProvider extends AbstractSimilarityProvider { - - private final LMDirichletSimilarity similarity; - - public LMDirichletSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float mu = settings.getAsFloat("mu", 2000f); - this.similarity = new LMDirichletSimilarity(mu); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java deleted file mode 100644 index 2ee04b78ec2ef..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/similarity/LMJelinekMercerSimilarityProvider.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.similarity; - -import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; -import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; - -/** - * {@link SimilarityProvider} for {@link LMJelinekMercerSimilarity}. - *

    - * Configuration options available: - *

      - *
    • lambda
    • - *
    - * @see LMJelinekMercerSimilarity For more information about configuration - */ -public class LMJelinekMercerSimilarityProvider extends AbstractSimilarityProvider { - - private final LMJelinekMercerSimilarity similarity; - - public LMJelinekMercerSimilarityProvider(String name, Settings settings, Settings indexSettings) { - super(name); - float lambda = settings.getAsFloat("lambda", 0.1f); - this.similarity = new LMJelinekMercerSimilarity(lambda); - } - - /** - * {@inheritDoc} - */ - @Override - public Similarity get() { - return similarity; - } -} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java index e290fd3457aeb..190f861f26216 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/ScriptedSimilarityProvider.java @@ -20,6 +20,8 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; @@ -27,13 +29,11 @@ import org.elasticsearch.script.SimilarityWeightScript; /** Provider of scripted similarities. */ -public class ScriptedSimilarityProvider extends AbstractSimilarityProvider { +final class ScriptedSimilarityProvider implements TriFunction { - private final ScriptedSimilarity scriptedSimilarity; - - public ScriptedSimilarityProvider(String name, Settings settings, Settings indexSettings, ScriptService scriptService) { - super(name); - boolean discountOverlaps = settings.getAsBoolean("discount_overlaps", true); + @Override + public Similarity apply(Settings settings, Version indexCreatedVersion, ScriptService scriptService) { + boolean discountOverlaps = settings.getAsBoolean(SimilarityProviders.DISCOUNT_OVERLAPS, true); Settings scriptSettings = settings.getAsSettings("script"); Script script = Script.parse(scriptSettings); SimilarityScript.Factory scriptFactory = scriptService.compile(script, SimilarityScript.CONTEXT); @@ -44,15 +44,10 @@ public ScriptedSimilarityProvider(String name, Settings settings, Settings index weightScript = Script.parse(weightScriptSettings); weightScriptFactory = scriptService.compile(weightScript, SimilarityWeightScript.CONTEXT); } - scriptedSimilarity = new ScriptedSimilarity( + return new ScriptedSimilarity( weightScript == null ? null : weightScript.toString(), weightScriptFactory == null ? null : weightScriptFactory::newInstance, script.toString(), scriptFactory::newInstance, discountOverlaps); } - @Override - public Similarity get() { - return scriptedSimilarity; - } - } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java index 666e70c406937..fed15b3058360 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProvider.java @@ -20,32 +20,32 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.Similarity; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.ScriptService; /** - * Provider for {@link Similarity} instances + * Wrapper around a {@link Similarity} and its name. */ -public interface SimilarityProvider { +public final class SimilarityProvider { + + private final String name; + private final Similarity similarity; + + public SimilarityProvider(String name, Similarity similarity) { + this.name = name; + this.similarity = similarity; + } /** - * Returns the name associated with the Provider - * - * @return Name of the Provider + * Return the name of this {@link Similarity}. */ - String name(); + public String name() { + return name; + } /** - * Returns the {@link Similarity} the Provider is for - * - * @return Provided {@link Similarity} + * Return the wrapped {@link Similarity}. */ - Similarity get(); - - /** Factory of {@link SimilarityProvider} */ - @FunctionalInterface - interface Factory { - /** Create a new {@link SimilarityProvider}. */ - SimilarityProvider create(String name, Settings settings, Settings indexSettings, ScriptService scriptService); + public Similarity get() { + return similarity; } + } diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java new file mode 100644 index 0000000000000..18c6d6a3fc063 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityProviders.java @@ -0,0 +1,300 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.similarity; + +import org.apache.lucene.search.similarities.AfterEffect; +import org.apache.lucene.search.similarities.AfterEffectB; +import org.apache.lucene.search.similarities.AfterEffectL; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.BasicModel; +import org.apache.lucene.search.similarities.BasicModelBE; +import org.apache.lucene.search.similarities.BasicModelD; +import org.apache.lucene.search.similarities.BasicModelG; +import org.apache.lucene.search.similarities.BasicModelIF; +import org.apache.lucene.search.similarities.BasicModelIn; +import org.apache.lucene.search.similarities.BasicModelIne; +import org.apache.lucene.search.similarities.BasicModelP; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.DFISimilarity; +import org.apache.lucene.search.similarities.DFRSimilarity; +import org.apache.lucene.search.similarities.Distribution; +import org.apache.lucene.search.similarities.DistributionLL; +import org.apache.lucene.search.similarities.DistributionSPL; +import org.apache.lucene.search.similarities.IBSimilarity; +import org.apache.lucene.search.similarities.Independence; +import org.apache.lucene.search.similarities.IndependenceChiSquared; +import org.apache.lucene.search.similarities.IndependenceSaturated; +import org.apache.lucene.search.similarities.IndependenceStandardized; +import org.apache.lucene.search.similarities.LMDirichletSimilarity; +import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; +import org.apache.lucene.search.similarities.Lambda; +import org.apache.lucene.search.similarities.LambdaDF; +import org.apache.lucene.search.similarities.LambdaTTF; +import org.apache.lucene.search.similarities.Normalization; +import org.apache.lucene.search.similarities.NormalizationH1; +import org.apache.lucene.search.similarities.NormalizationH2; +import org.apache.lucene.search.similarities.NormalizationH3; +import org.apache.lucene.search.similarities.NormalizationZ; +import org.elasticsearch.Version; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Settings; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static java.util.Collections.unmodifiableMap; + +final class SimilarityProviders { + + private SimilarityProviders() {} // no instantiation + + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityProviders.class)); + static final String DISCOUNT_OVERLAPS = "discount_overlaps"; + + private static final Map BASIC_MODELS; + private static final Map AFTER_EFFECTS; + + static { + Map models = new HashMap<>(); + models.put("be", new BasicModelBE()); + models.put("d", new BasicModelD()); + models.put("g", new BasicModelG()); + models.put("if", new BasicModelIF()); + models.put("in", new BasicModelIn()); + models.put("ine", new BasicModelIne()); + models.put("p", new BasicModelP()); + BASIC_MODELS = unmodifiableMap(models); + + Map effects = new HashMap<>(); + effects.put("no", new AfterEffect.NoAfterEffect()); + effects.put("b", new AfterEffectB()); + effects.put("l", new AfterEffectL()); + AFTER_EFFECTS = unmodifiableMap(effects); + } + + private static final Map INDEPENDENCE_MEASURES; + static { + Map measures = new HashMap<>(); + measures.put("standardized", new IndependenceStandardized()); + measures.put("saturated", new IndependenceSaturated()); + measures.put("chisquared", new IndependenceChiSquared()); + INDEPENDENCE_MEASURES = unmodifiableMap(measures); + } + + private static final Map DISTRIBUTIONS; + private static final Map LAMBDAS; + + static { + Map distributions = new HashMap<>(); + distributions.put("ll", new DistributionLL()); + distributions.put("spl", new DistributionSPL()); + DISTRIBUTIONS = unmodifiableMap(distributions); + + Map lamdas = new HashMap<>(); + lamdas.put("df", new LambdaDF()); + lamdas.put("ttf", new LambdaTTF()); + LAMBDAS = unmodifiableMap(lamdas); + } + + /** + * Parses the given Settings and creates the appropriate {@link BasicModel} + * + * @param settings Settings to parse + * @return {@link BasicModel} referred to in the Settings + */ + private static BasicModel parseBasicModel(Settings settings) { + String basicModel = settings.get("basic_model"); + BasicModel model = BASIC_MODELS.get(basicModel); + if (model == null) { + throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "], expected one of " + BASIC_MODELS.keySet()); + } + return model; + } + + /** + * Parses the given Settings and creates the appropriate {@link AfterEffect} + * + * @param settings Settings to parse + * @return {@link AfterEffect} referred to in the Settings + */ + private static AfterEffect parseAfterEffect(Settings settings) { + String afterEffect = settings.get("after_effect"); + AfterEffect effect = AFTER_EFFECTS.get(afterEffect); + if (effect == null) { + throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "], expected one of " + AFTER_EFFECTS.keySet()); + } + return effect; + } + + /** + * Parses the given Settings and creates the appropriate {@link Normalization} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Normalization parseNormalization(Settings settings) { + String normalization = settings.get("normalization"); + + if ("no".equals(normalization)) { + return new Normalization.NoNormalization(); + } else if ("h1".equals(normalization)) { + float c = settings.getAsFloat("normalization.h1.c", 1f); + return new NormalizationH1(c); + } else if ("h2".equals(normalization)) { + float c = settings.getAsFloat("normalization.h2.c", 1f); + return new NormalizationH2(c); + } else if ("h3".equals(normalization)) { + float c = settings.getAsFloat("normalization.h3.c", 800f); + return new NormalizationH3(c); + } else if ("z".equals(normalization)) { + float z = settings.getAsFloat("normalization.z.z", 0.30f); + return new NormalizationZ(z); + } else { + throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); + } + } + + private static Independence parseIndependence(Settings settings) { + String name = settings.get("independence_measure"); + Independence measure = INDEPENDENCE_MEASURES.get(name); + if (measure == null) { + throw new IllegalArgumentException("Unsupported IndependenceMeasure [" + name + "], expected one of " + + INDEPENDENCE_MEASURES.keySet()); + } + return measure; + } + + /** + * Parses the given Settings and creates the appropriate {@link Distribution} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Distribution parseDistribution(Settings settings) { + String rawDistribution = settings.get("distribution"); + Distribution distribution = DISTRIBUTIONS.get(rawDistribution); + if (distribution == null) { + throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); + } + return distribution; + } + + /** + * Parses the given Settings and creates the appropriate {@link Lambda} + * + * @param settings Settings to parse + * @return {@link Normalization} referred to in the Settings + */ + private static Lambda parseLambda(Settings settings) { + String rawLambda = settings.get("lambda"); + Lambda lambda = LAMBDAS.get(rawLambda); + if (lambda == null) { + throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); + } + return lambda; + } + + static void assertSettingsIsSubsetOf(String type, Version version, Settings settings, String... supportedSettings) { + Set unknownSettings = new HashSet<>(settings.keySet()); + unknownSettings.removeAll(Arrays.asList(supportedSettings)); + unknownSettings.remove("type"); // used to figure out which sim this is + if (unknownSettings.isEmpty() == false) { + if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); + } else { + DEPRECATION_LOGGER.deprecated("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); + } + } + } + + public static BM25Similarity createBM25Similarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("BM25", indexCreatedVersion, settings, "k1", "b", DISCOUNT_OVERLAPS); + + float k1 = settings.getAsFloat("k1", 1.2f); + float b = settings.getAsFloat("b", 0.75f); + boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); + + BM25Similarity similarity = new BM25Similarity(k1, b); + similarity.setDiscountOverlaps(discountOverlaps); + return similarity; + } + + public static BooleanSimilarity createBooleanSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("boolean", indexCreatedVersion, settings); + return new BooleanSimilarity(); + } + + public static ClassicSimilarity createClassicSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("classic", indexCreatedVersion, settings, DISCOUNT_OVERLAPS); + + boolean discountOverlaps = settings.getAsBoolean(DISCOUNT_OVERLAPS, true); + + ClassicSimilarity similarity = new ClassicSimilarity(); + similarity.setDiscountOverlaps(discountOverlaps); + return similarity; + } + + public static DFRSimilarity createDfrSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("DFR", indexCreatedVersion, settings, + "basic_model", "after_effect", "normalization", + "normalization.h1.c", "normalization.h2.c", "normalization.h3.c", "normalization.z.z"); + + + return new DFRSimilarity( + parseBasicModel(settings), + parseAfterEffect(settings), + parseNormalization(settings)); + } + + public static DFISimilarity createDfiSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("DFI", indexCreatedVersion, settings, "independence_measure"); + + return new DFISimilarity(parseIndependence(settings)); + } + + public static IBSimilarity createIBSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("IB", indexCreatedVersion, settings, "distribution", "lambda", "normalization", + "normalization.h1.c", "normalization.h2.c", "normalization.h3.c", "normalization.z.z"); + + return new IBSimilarity( + parseDistribution(settings), + parseLambda(settings), + parseNormalization(settings)); + } + + public static LMDirichletSimilarity createLMDirichletSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("LMDirichlet", indexCreatedVersion, settings, "mu"); + + float mu = settings.getAsFloat("mu", 2000f); + return new LMDirichletSimilarity(mu); + } + + public static LMJelinekMercerSimilarity createLMJelinekMercerSimilarity(Settings settings, Version indexCreatedVersion) { + assertSettingsIsSubsetOf("LMJelinekMercer", indexCreatedVersion, settings, "lambda"); + + float lambda = settings.getAsFloat("lambda", 0.1f); + return new LMJelinekMercerSimilarity(lambda); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java index 16afb55599d49..eaed2169f11c0 100644 --- a/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/elasticsearch/index/similarity/SimilarityService.java @@ -19,8 +19,13 @@ package org.elasticsearch.index.similarity; +import org.apache.lucene.search.similarities.BM25Similarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; +import org.elasticsearch.Version; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -34,45 +39,84 @@ import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Function; +import java.util.function.Supplier; public final class SimilarityService extends AbstractIndexComponent { private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(SimilarityService.class)); public static final String DEFAULT_SIMILARITY = "BM25"; - private final Similarity defaultSimilarity; - private final Map similarities; - private static final Map DEFAULTS; - public static final Map BUILT_IN; + private static final String CLASSIC_SIMILARITY = "classic"; + private static final Map>> DEFAULTS; + public static final Map> BUILT_IN; static { - Map defaults = new HashMap<>(); - defaults.put("classic", - (name, settings, indexSettings, scriptService) -> new ClassicSimilarityProvider(name, settings, indexSettings)); - defaults.put("BM25", - (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); - defaults.put("boolean", - (name, settings, indexSettings, scriptService) -> new BooleanSimilarityProvider(name, settings, indexSettings)); - - Map builtIn = new HashMap<>(defaults); + Map>> defaults = new HashMap<>(); + defaults.put(CLASSIC_SIMILARITY, version -> { + if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + return () -> { + throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead."); + }; + } else { + final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); + return () -> { + DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); + return similarity; + }; + } + }); + defaults.put("BM25", version -> { + final BM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); + return () -> similarity; + }); + defaults.put("boolean", version -> { + final Similarity similarity = new BooleanSimilarity(); + return () -> similarity; + }); + + Map> builtIn = new HashMap<>(); + builtIn.put(CLASSIC_SIMILARITY, + (settings, version, script) -> { + if (version.onOrAfter(Version.V_7_0_0_alpha1)) { + throw new IllegalArgumentException("The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead."); + } else { + DEPRECATION_LOGGER.deprecated("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); + return SimilarityProviders.createClassicSimilarity(settings, version); + } + }); + builtIn.put("BM25", + (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); + builtIn.put("boolean", + (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); builtIn.put("DFR", - (name, settings, indexSettings, scriptService) -> new DFRSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createDfrSimilarity(settings, version)); builtIn.put("IB", - (name, settings, indexSettings, scriptService) -> new IBSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createIBSimilarity(settings, version)); builtIn.put("LMDirichlet", - (name, settings, indexSettings, scriptService) -> new LMDirichletSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createLMDirichletSimilarity(settings, version)); builtIn.put("LMJelinekMercer", - (name, settings, indexSettings, scriptService) -> new LMJelinekMercerSimilarityProvider(name, settings, indexSettings)); + (settings, version, scriptService) -> SimilarityProviders.createLMJelinekMercerSimilarity(settings, version)); builtIn.put("DFI", - (name, settings, indexSettings, scriptService) -> new DFISimilarityProvider(name, settings, indexSettings)); - builtIn.put("scripted", ScriptedSimilarityProvider::new); + (settings, version, scriptService) -> SimilarityProviders.createDfiSimilarity(settings, version)); + builtIn.put("scripted", new ScriptedSimilarityProvider()); DEFAULTS = Collections.unmodifiableMap(defaults); BUILT_IN = Collections.unmodifiableMap(builtIn); } + private final Similarity defaultSimilarity; + private final Map> similarities; + public SimilarityService(IndexSettings indexSettings, ScriptService scriptService, - Map similarities) { + Map> similarities) { super(indexSettings); - Map providers = new HashMap<>(similarities.size()); + Map> providers = new HashMap<>(similarities.size()); Map similaritySettings = this.indexSettings.getSettings().getGroups(IndexModule.SIMILARITY_SETTINGS_PREFIX); + for (Map.Entry entry : similaritySettings.entrySet()) { String name = entry.getKey(); if (BUILT_IN.containsKey(name)) { @@ -85,14 +129,13 @@ public SimilarityService(IndexSettings indexSettings, ScriptService scriptServic } else if ((similarities.containsKey(typeName) || BUILT_IN.containsKey(typeName)) == false) { throw new IllegalArgumentException("Unknown Similarity type [" + typeName + "] for [" + name + "]"); } - SimilarityProvider.Factory defaultFactory = BUILT_IN.get(typeName); - SimilarityProvider.Factory factory = similarities.getOrDefault(typeName, defaultFactory); - providers.put(name, factory.create(name, providerSettings, indexSettings.getSettings(), scriptService)); + TriFunction defaultFactory = BUILT_IN.get(typeName); + TriFunction factory = similarities.getOrDefault(typeName, defaultFactory); + final Similarity similarity = factory.apply(providerSettings, indexSettings.getIndexVersionCreated(), scriptService); + providers.put(name, () -> similarity); } - Map providerMapping = addSimilarities(similaritySettings, indexSettings.getSettings(), scriptService, - DEFAULTS); - for (Map.Entry entry : providerMapping.entrySet()) { - providers.put(entry.getKey(), entry.getValue()); + for (Map.Entry>> entry : DEFAULTS.entrySet()) { + providers.put(entry.getKey(), entry.getValue().apply(indexSettings.getIndexVersionCreated())); } this.similarities = providers; defaultSimilarity = (providers.get("default") != null) ? providers.get("default").get() @@ -108,25 +151,16 @@ public Similarity similarity(MapperService mapperService) { defaultSimilarity; } - private Map addSimilarities(Map similaritySettings, Settings indexSettings, - ScriptService scriptService, Map similarities) { - Map providers = new HashMap<>(similarities.size()); - for (Map.Entry entry : similarities.entrySet()) { - String name = entry.getKey(); - SimilarityProvider.Factory factory = entry.getValue(); - Settings providerSettings = similaritySettings.get(name); - if (providerSettings == null) { - providerSettings = Settings.Builder.EMPTY_SETTINGS; - } - providers.put(name, factory.create(name, providerSettings, indexSettings, scriptService)); - } - return providers; - } - + public SimilarityProvider getSimilarity(String name) { - return similarities.get(name); + Supplier sim = similarities.get(name); + if (sim == null) { + return null; + } + return new SimilarityProvider(name, sim.get()); } + // for testing Similarity getDefaultSimilarity() { return defaultSimilarity; } diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 706421c5ce73a..dde9c1ca3bdb6 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -59,7 +59,6 @@ import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.IndicesModule; @@ -287,17 +286,8 @@ public void testAddSimilarity() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), emptyAnalysisRegistry); - module.addSimilarity("test_similarity", (string, providerSettings, indexLevelSettings, scriptService) -> new SimilarityProvider() { - @Override - public String name() { - return string; - } - - @Override - public Similarity get() { - return new TestSimilarity(providerSettings.get("key")); - } - }); + module.addSimilarity("test_similarity", + (providerSettings, indexCreatedVersion, scriptService) -> new TestSimilarity(providerSettings.get("key"))); IndexService indexService = newIndexService(module); SimilarityService similarityService = indexService.similarityService(); diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index ed219c972b614..5d18a595e9687 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.BM25Similarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.BooleanSimilarity; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; @@ -50,10 +50,10 @@ public void testOverrideBuiltInSimilarity() { } public void testOverrideDefaultSimilarity() { - Settings settings = Settings.builder().put("index.similarity.default.type", "classic") + Settings settings = Settings.builder().put("index.similarity.default.type", "boolean") .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); SimilarityService service = new SimilarityService(indexSettings, null, Collections.emptyMap()); - assertTrue(service.getDefaultSimilarity() instanceof ClassicSimilarity); + assertTrue(service.getDefaultSimilarity() instanceof BooleanSimilarity); } } diff --git a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index 2ab905a2dd526..3de02f6831837 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -33,6 +33,8 @@ import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.NormalizationH2; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -60,7 +62,24 @@ protected Collection> getPlugins() { public void testResolveDefaultSimilarities() { SimilarityService similarityService = createIndex("foo").similarityService(); + assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); + assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); + assertThat(similarityService.getSimilarity("default"), equalTo(null)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> similarityService.getSimilarity("classic")); + assertEquals("The [classic] similarity may not be used anymore. Please use the [BM25] similarity or build a custom [scripted] " + + "similarity instead.", e.getMessage()); + } + + public void testResolveDefaultSimilaritiesOn6xIndex() { + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden + .build(); + SimilarityService similarityService = createIndex("foo", indexSettings).similarityService(); assertThat(similarityService.getSimilarity("classic").get(), instanceOf(ClassicSimilarity.class)); + assertWarnings("The [classic] similarity is now deprecated in favour of BM25, which is generally " + + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " + + "instead."); assertThat(similarityService.getSimilarity("BM25").get(), instanceOf(BM25Similarity.class)); assertThat(similarityService.getSimilarity("boolean").get(), instanceOf(BooleanSimilarity.class)); assertThat(similarityService.getSimilarity("default"), equalTo(null)); @@ -76,15 +95,27 @@ public void testResolveSimilaritiesFromMapping_classic() throws IOException { Settings indexSettings = Settings.builder() .put("index.similarity.my_similarity.type", "classic") .put("index.similarity.my_similarity.discount_overlaps", false) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_6_3_0) // otherwise classic is forbidden .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(ClassicSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(ClassicSimilarity.class)); ClassicSimilarity similarity = (ClassicSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getDiscountOverlaps(), equalTo(false)); } + public void testResolveSimilaritiesFromMapping_classicIsForbidden() throws IOException { + Settings indexSettings = Settings.builder() + .put("index.similarity.my_similarity.type", "classic") + .put("index.similarity.my_similarity.discount_overlaps", false) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> createIndex("foo", indexSettings)); + assertEquals("The [classic] similarity may not be used anymore. Please use the [BM25] similarity or build a custom [scripted] " + + "similarity instead.", e.getMessage()); + } + public void testResolveSimilaritiesFromMapping_bm25() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties") @@ -100,7 +131,7 @@ public void testResolveSimilaritiesFromMapping_bm25() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(BM25SimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(BM25Similarity.class)); BM25Similarity similarity = (BM25Similarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getK1(), equalTo(2.0f)); @@ -119,8 +150,8 @@ public void testResolveSimilaritiesFromMapping_boolean() throws IOException { DocumentMapper documentMapper = indexService.mapperService() .documentMapperParser() .parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), - instanceOf(BooleanSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), + instanceOf(BooleanSimilarity.class)); } public void testResolveSimilaritiesFromMapping_DFR() throws IOException { @@ -139,7 +170,7 @@ public void testResolveSimilaritiesFromMapping_DFR() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(DFRSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(DFRSimilarity.class)); DFRSimilarity similarity = (DFRSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getBasicModel(), instanceOf(BasicModelG.class)); @@ -164,7 +195,7 @@ public void testResolveSimilaritiesFromMapping_IB() throws IOException { .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(IBSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(IBSimilarity.class)); IBSimilarity similarity = (IBSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getDistribution(), instanceOf(DistributionSPL.class)); @@ -187,7 +218,7 @@ public void testResolveSimilaritiesFromMapping_DFI() throws IOException { IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); MappedFieldType fieldType = documentMapper.mappers().getMapper("field1").fieldType(); - assertThat(fieldType.similarity(), instanceOf(DFISimilarityProvider.class)); + assertThat(fieldType.similarity().get(), instanceOf(DFISimilarity.class)); DFISimilarity similarity = (DFISimilarity) fieldType.similarity().get(); assertThat(similarity.getIndependence(), instanceOf(IndependenceChiSquared.class)); } @@ -205,7 +236,7 @@ public void testResolveSimilaritiesFromMapping_LMDirichlet() throws IOException .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMDirichletSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(LMDirichletSimilarity.class)); LMDirichletSimilarity similarity = (LMDirichletSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getMu(), equalTo(3000f)); @@ -224,7 +255,7 @@ public void testResolveSimilaritiesFromMapping_LMJelinekMercer() throws IOExcept .build(); IndexService indexService = createIndex("foo", indexSettings); DocumentMapper documentMapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(LMJelinekMercerSimilarityProvider.class)); + assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().get(), instanceOf(LMJelinekMercerSimilarity.class)); LMJelinekMercerSimilarity similarity = (LMJelinekMercerSimilarity) documentMapper.mappers().getMapper("field1").fieldType().similarity().get(); assertThat(similarity.getLambda(), equalTo(0.7f)); @@ -245,4 +276,14 @@ public void testResolveSimilaritiesFromMapping_Unknown() throws IOException { assertThat(e.getMessage(), equalTo("Unknown Similarity type [unknown_similarity] for field [field1]")); } } + + public void testUnknownParameters() throws IOException { + Settings indexSettings = Settings.builder() + .put("index.similarity.my_similarity.type", "BM25") + .put("index.similarity.my_similarity.z", 2.0f) + .build(); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> createIndex("foo", indexSettings)); + assertEquals("Unknown settings for similarity of type [BM25]: [z]", e.getMessage()); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 7cef608850e11..46d7311a90e23 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.indices; +import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -49,7 +50,6 @@ import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; import org.elasticsearch.indices.IndicesService.ShardDeletionCheckResult; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; @@ -106,7 +106,7 @@ public Map getMappers() { public void onIndexModule(IndexModule indexModule) { super.onIndexModule(indexModule); indexModule.addSimilarity("fake-similarity", - (name, settings, indexSettings, scriptService) -> new BM25SimilarityProvider(name, settings, indexSettings)); + (settings, indexCreatedVersion, scriptService) -> new BM25Similarity()); } } @@ -375,8 +375,8 @@ public void testStandAloneMapperServiceWithPlugins() throws IOException { .build(); MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); assertNotNull(mapperService.documentMapperParser().parserContext("type").typeParser("fake-mapper")); - assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test"), - instanceOf(BM25SimilarityProvider.class)); + assertThat(mapperService.documentMapperParser().parserContext("type").getSimilarity("test").get(), + instanceOf(BM25Similarity.class)); } public void testStatsByShardDoesNotDieFromExpectedExceptions() { diff --git a/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java b/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java index c925e46cfa048..35e5b7071872b 100644 --- a/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java +++ b/server/src/test/java/org/elasticsearch/similarity/SimilarityIT.java @@ -46,7 +46,7 @@ public void testCustomBM25Similarity() throws Exception { .field("type", "text") .endObject() .startObject("field2") - .field("similarity", "classic") + .field("similarity", "boolean") .field("type", "text") .endObject() .endObject() @@ -68,9 +68,9 @@ public void testCustomBM25Similarity() throws Exception { assertThat(bm25SearchResponse.getHits().getTotalHits(), equalTo(1L)); float bm25Score = bm25SearchResponse.getHits().getHits()[0].getScore(); - SearchResponse defaultSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); - assertThat(defaultSearchResponse.getHits().getTotalHits(), equalTo(1L)); - float defaultScore = defaultSearchResponse.getHits().getHits()[0].getScore(); + SearchResponse booleanSearchResponse = client().prepareSearch().setQuery(matchQuery("field2", "quick brown fox")).execute().actionGet(); + assertThat(booleanSearchResponse.getHits().getTotalHits(), equalTo(1L)); + float defaultScore = booleanSearchResponse.getHits().getHits()[0].getScore(); assertThat(bm25Score, not(equalTo(defaultScore))); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index 818594d3bf7fd..28767cb34d73b 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -20,13 +20,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.search.Query; +import org.apache.lucene.search.similarities.BM25Similarity; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.AnalyzerScope; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.similarity.BM25SimilarityProvider; +import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -123,17 +124,17 @@ public void normalizeOther(MappedFieldType other) { new Modifier("similarity", false) { @Override public void modify(MappedFieldType ft) { - ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY, INDEX_SETTINGS)); + ft.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); } }, new Modifier("similarity", false) { @Override public void modify(MappedFieldType ft) { - ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY, INDEX_SETTINGS)); + ft.setSimilarity(new SimilarityProvider("foo", new BM25Similarity())); } @Override public void normalizeOther(MappedFieldType other) { - other.setSimilarity(new BM25SimilarityProvider("bar", Settings.EMPTY, INDEX_SETTINGS)); + other.setSimilarity(new SimilarityProvider("bar", new BM25Similarity())); } }, new Modifier("eager_global_ordinals", true) { From 4db6fc9a08f66ec74c8a39dcadb3a4d11cc13368 Mon Sep 17 00:00:00 2001 From: Johnny Marnell Date: Tue, 3 Apr 2018 11:22:04 -0400 Subject: [PATCH 51/68] Reindex: Fix error in delete-by-query rest spec (#29318) --- .../main/resources/rest-api-spec/api/delete_by_query.json | 6 +++--- .../main/resources/rest-api-spec/api/update_by_query.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json index 8ed3202e9af81..4c7c3240dc29a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_by_query.json @@ -53,7 +53,7 @@ "type" : "enum", "options": ["abort", "proceed"], "default": "abort", - "description" : "What to do when the delete-by-query hits version conflicts?" + "description" : "What to do when the delete by query hits version conflicts?" }, "expand_wildcards": { "type" : "enum", @@ -142,12 +142,12 @@ "scroll_size": { "type": "number", "defaut_value": 100, - "description": "Size on the scroll request powering the update_by_query" + "description": "Size on the scroll request powering the delete by query" }, "wait_for_completion": { "type" : "boolean", "default": true, - "description" : "Should the request should block until the delete-by-query is complete." + "description" : "Should the request should block until the delete by query is complete." }, "requests_per_second": { "type": "number", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json index 072e950686aa2..3e77f7cd145f5 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/update_by_query.json @@ -150,7 +150,7 @@ "scroll_size": { "type": "number", "defaut_value": 100, - "description": "Size on the scroll request powering the update_by_query" + "description": "Size on the scroll request powering the update by query" }, "wait_for_completion": { "type" : "boolean", From 7c6d5cbf1fd7e43667cd5e796fe106694016f026 Mon Sep 17 00:00:00 2001 From: Uwe Schindler Date: Tue, 3 Apr 2018 19:22:12 +0200 Subject: [PATCH 52/68] Build: Fix Java9 MR build (#29312) Correctly setup classpath/dependencies and fix checkstyle task that was partly broken because delayed setup of Java9 sourcesets. This also cleans packaging of META-INF. It also prepares forbiddenapis 2.6 upgrade relates #29292 --- .../elasticsearch/gradle/BuildPlugin.groovy | 2 +- .../gradle/precommit/PrecommitTasks.groovy | 42 ++++++++----------- server/build.gradle | 18 +++++++- 3 files changed, 35 insertions(+), 27 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index fcd6d6925598f..50e1cd68523d5 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -551,7 +551,7 @@ class BuildPlugin implements Plugin { if (project.licenseFile == null || project.noticeFile == null) { throw new GradleException("Must specify license and notice file for project ${project.path}") } - jarTask.into('META-INF') { + jarTask.metaInf { from(project.licenseFile.parent) { include project.licenseFile.name } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index ef6f24c5acf5a..09f0ad01578c9 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -18,6 +18,7 @@ */ package org.elasticsearch.gradle.precommit +import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.gradle.api.Project import org.gradle.api.Task @@ -83,17 +84,14 @@ class PrecommitTasks { getClass().getResource('/forbidden/es-all-signatures.txt')] suppressAnnotations = ['**.SuppressForbidden'] } - Task mainForbidden = project.tasks.findByName('forbiddenApisMain') - if (mainForbidden != null) { - mainForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/es-server-signatures.txt') - } - } - Task testForbidden = project.tasks.findByName('forbiddenApisTest') - if (testForbidden != null) { - testForbidden.configure { - signaturesURLs += getClass().getResource('/forbidden/es-test-signatures.txt') - signaturesURLs += getClass().getResource('/forbidden/http-signatures.txt') + project.tasks.withType(CheckForbiddenApis) { + // we do not use the += operator to add signatures, as conventionMappings of Gradle do not work when it's configured using withType: + if (name.endsWith('Test')) { + signaturesURLs = project.forbiddenApis.signaturesURLs + + [ getClass().getResource('/forbidden/es-test-signatures.txt'), getClass().getResource('/forbidden/http-signatures.txt') ] + } else { + signaturesURLs = project.forbiddenApis.signaturesURLs + + [ getClass().getResource('/forbidden/es-server-signatures.txt') ] } } Task forbiddenApis = project.tasks.findByName('forbiddenApis') @@ -144,21 +142,15 @@ class PrecommitTasks { ] toolVersion = 7.5 } - for (String taskName : ['checkstyleMain', 'checkstyleJava9', 'checkstyleTest']) { - Task task = project.tasks.findByName(taskName) - if (task != null) { - project.tasks['check'].dependsOn.remove(task) - checkstyleTask.dependsOn(task) - task.dependsOn(copyCheckstyleConf) - task.inputs.file(checkstyleSuppressions) - task.reports { - html.enabled false - } - } - } - project.tasks.withType(Checkstyle) { - dependsOn(copyCheckstyleConf) + project.tasks.withType(Checkstyle) { task -> + project.tasks[JavaBasePlugin.CHECK_TASK_NAME].dependsOn.remove(task) + checkstyleTask.dependsOn(task) + task.dependsOn(copyCheckstyleConf) + task.inputs.file(checkstyleSuppressions) + task.reports { + html.enabled false + } } return checkstyleTask diff --git a/server/build.gradle b/server/build.gradle index ab74520106da9..ab10b7571e8a6 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -45,14 +45,30 @@ if (!isEclipse && !isIdea) { } } } + + configurations { + java9Compile.extendsFrom(compile) + } + + dependencies { + java9Compile sourceSets.main.output + } compileJava9Java { sourceCompatibility = 9 targetCompatibility = 9 } + + /* Enable this when forbiddenapis was updated to 2.6. + * See: https://github.com/elastic/elasticsearch/issues/29292 + forbiddenApisJava9 { + targetCompatibility = 9 + } + */ jar { - into('META-INF/versions/9') { + metaInf { + into 'versions/9' from sourceSets.java9.output } manifest.attributes('Multi-Release': 'true') From 8e2f2be2496834593ac2e2c4579c7c97bc1c2b12 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 3 Apr 2018 16:45:53 -0400 Subject: [PATCH 53/68] Track Lucene operations in engine explicitly (#29357) Today we reply on `IndexWriter#hasDeletions` to check if an index contains "update" operations. However, this check considers both deletes and updates. This commit replaces that check by tracking and checking Lucene operations explicitly. This would provide us stronger assertions. --- .../index/engine/InternalEngine.java | 39 ++++++++++--- .../index/engine/InternalEngineTests.java | 57 ++++++++++++------- 2 files changed, 68 insertions(+), 28 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index a873898d52c21..2f6e3ab0343f4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -136,6 +136,11 @@ public class InternalEngine extends Engine { private final AtomicLong maxSeqNoOfNonAppendOnlyOperations = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); private final CounterMetric numVersionLookups = new CounterMetric(); private final CounterMetric numIndexVersionsLookups = new CounterMetric(); + // Lucene operations since this engine was opened - not include operations from existing segments. + private final CounterMetric numDocDeletes = new CounterMetric(); + private final CounterMetric numDocAppends = new CounterMetric(); + private final CounterMetric numDocUpdates = new CounterMetric(); + /** * How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this * across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents @@ -907,11 +912,11 @@ private IndexResult indexIntoLucene(Index index, IndexingStrategy plan) index.parsedDoc().version().setLongValue(plan.versionForIndexing); try { if (plan.useLuceneUpdateDocument) { - update(index.uid(), index.docs(), indexWriter); + updateDocs(index.uid(), index.docs(), indexWriter); } else { // document does not exists, we can optimize for create, but double check if assertions are running assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false); - index(index.docs(), indexWriter); + addDocs(index.docs(), indexWriter); } return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted); } catch (Exception ex) { @@ -968,12 +973,13 @@ long getMaxSeqNoOfNonAppendOnlyOperations() { return maxSeqNoOfNonAppendOnlyOperations.get(); } - private static void index(final List docs, final IndexWriter indexWriter) throws IOException { + private void addDocs(final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.addDocuments(docs); } else { indexWriter.addDocument(docs.get(0)); } + numDocAppends.inc(docs.size()); } private static final class IndexingStrategy { @@ -1054,12 +1060,13 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele return true; } - private static void update(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { if (docs.size() > 1) { indexWriter.updateDocuments(uid, docs); } else { indexWriter.updateDocument(uid, docs.get(0)); } + numDocUpdates.inc(docs.size()); } @Override @@ -1188,6 +1195,7 @@ private DeleteResult deleteInLucene(Delete delete, DeletionStrategy plan) // any exception that comes from this is a either an ACE or a fatal exception there // can't be any document failures coming from this indexWriter.deleteDocuments(delete.uid()); + numDocDeletes.inc(); } versionMap.putUnderLock(delete.uid().bytes(), new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(), @@ -2205,13 +2213,28 @@ boolean isSafeAccessRequired() { return versionMap.isSafeAccessRequired(); } + /** + * Returns the number of documents have been deleted since this engine was opened. + * This count does not include the deletions from the existing segments before opening engine. + */ + long getNumDocDeletes() { + return numDocDeletes.count(); + } + + /** + * Returns the number of documents have been appended since this engine was opened. + * This count does not include the appends from the existing segments before opening engine. + */ + long getNumDocAppends() { + return numDocAppends.count(); + } /** - * Returns true iff the index writer has any deletions either buffered in memory or - * in the index. + * Returns the number of documents have been updated since this engine was opened. + * This count does not include the updates from the existing segments before opening engine. */ - boolean indexWriterHasDeletions() { - return indexWriter.hasDeletions(); + long getNumDocUpdates() { + return numDocUpdates.count(); } @Override diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 71abfac3ebb32..9cdc68444ea16 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2939,21 +2939,21 @@ public void testDoubleDeliveryPrimary() throws IOException { Engine.Index retry = appendOnlyPrimary(doc, true, 1); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); Engine.IndexResult retryResult = engine.index(retry); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 1, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 0, 1, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); Engine.IndexResult indexResult = engine.index(operation); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 0, 2, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -3000,23 +3000,23 @@ public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); engine.delete(delete); assertEquals(1, engine.getNumVersionLookups()); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 1); Engine.IndexResult retryResult = engine.index(retry); assertEquals(belowLckp ? 1 : 2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); engine.delete(delete); - assertTrue(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 1); assertEquals(2, engine.getNumVersionLookups()); Engine.IndexResult indexResult = engine.index(operation); assertEquals(belowLckp ? 2 : 3, engine.getNumVersionLookups()); @@ -3041,21 +3041,29 @@ public void testDoubleDeliveryReplicaAppendingOnly() throws IOException { final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(0, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); Engine.IndexResult retryResult = engine.index(retry); - assertEquals(retry.seqNo() > operation.seqNo(), engine.indexWriterHasDeletions()); + if (retry.seqNo() > operation.seqNo()) { + assertLuceneOperations(engine, 1, 1, 0); + } else { + assertLuceneOperations(engine, 1, 0, 0); + } assertEquals(belowLckp ? 0 : 1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(retry); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); Engine.IndexResult indexResult = engine.index(operation); - assertEquals(operation.seqNo() > retry.seqNo(), engine.indexWriterHasDeletions()); + if (operation.seqNo() > retry.seqNo()) { + assertLuceneOperations(engine, 1, 1, 0); + } else { + assertLuceneOperations(engine, 1, 0, 0); + } assertEquals(belowLckp ? 1 : 2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -3096,27 +3104,27 @@ public void testDoubleDeliveryReplica() throws IOException { Engine.Index duplicate = replicaIndexForDoc(doc, 1, 20, true); if (randomBoolean()) { Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(indexResult.getTranslogLocation()); if (randomBoolean()) { engine.refresh("test"); } Engine.IndexResult retryResult = engine.index(duplicate); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0); } else { Engine.IndexResult retryResult = engine.index(duplicate); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(1, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); if (randomBoolean()) { engine.refresh("test"); } Engine.IndexResult indexResult = engine.index(operation); - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, 1, 0, 0); assertEquals(2, engine.getNumVersionLookups()); assertNotNull(retryResult.getTranslogLocation()); assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0); @@ -3278,10 +3286,11 @@ public void testRetryConcurrently() throws InterruptedException, IOException { } if (primary) { // primaries rely on lucene dedup and may index the same document twice - assertTrue(engine.indexWriterHasDeletions()); + assertThat(engine.getNumDocUpdates(), greaterThanOrEqualTo((long) numDocs)); + assertThat(engine.getNumDocAppends() + engine.getNumDocUpdates(), equalTo(numDocs * 2L)); } else { // replicas rely on seq# based dedup and in this setup (same seq#) should never rely on lucene - assertFalse(engine.indexWriterHasDeletions()); + assertLuceneOperations(engine, numDocs, 0, 0); } } @@ -3377,8 +3386,7 @@ public void run() { } assertEquals(0, engine.getNumVersionLookups()); assertEquals(0, engine.getNumIndexVersionsLookups()); - assertFalse(engine.indexWriterHasDeletions()); - + assertLuceneOperations(engine, numDocs, 0, 0); } public static long getNumVersionLookups(InternalEngine engine) { // for other tests to access this @@ -4659,4 +4667,13 @@ private static void trimUnsafeCommits(EngineConfig config) throws IOException { store.trimUnsafeCommits(globalCheckpoint, minRetainedTranslogGen, config.getIndexSettings().getIndexVersionCreated()); } + void assertLuceneOperations(InternalEngine engine, long expectedAppends, long expectedUpdates, long expectedDeletes) { + String message = "Lucene operations mismatched;" + + " appends [actual:" + engine.getNumDocAppends() + ", expected:" + expectedAppends + "]," + + " updates [actual:" + engine.getNumDocUpdates() + ", expected:" + expectedUpdates + "]," + + " deletes [actual:" + engine.getNumDocDeletes() + ", expected:" + expectedDeletes + "]"; + assertThat(message, engine.getNumDocAppends(), equalTo(expectedAppends)); + assertThat(message, engine.getNumDocUpdates(), equalTo(expectedUpdates)); + assertThat(message, engine.getNumDocDeletes(), equalTo(expectedDeletes)); + } } From 8fdca6a89aee06b65605a7920c04bba73d0315f4 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Apr 2018 17:27:26 -0400 Subject: [PATCH 54/68] Align cat thread pool info to thread pool config (#29195) Today we report thread pool info using a common object. This means that we use a shared set of terminology that is not consistent with the terminology used to the configure thread pools. This holds in particular for the minimum and maximum number of threads in the thread pool where we use the following terminology: thread pool info | fixed | scaling min core size max max size A previous change addressed this for the nodes info API. This commit changes the display of thread pool info in the cat thread pool API too to be dependent on the type of the thread pool so that we can align the terminology in the output of thread pool info with the terminology used to configure a thread pool. --- docs/reference/cat/thread_pool.asciidoc | 7 +++-- .../test/cat.thread_pool/10_basic.yml | 18 ++++++----- .../rest/action/cat/RestThreadPoolAction.java | 31 ++++++++++++------- 3 files changed, 34 insertions(+), 22 deletions(-) diff --git a/docs/reference/cat/thread_pool.asciidoc b/docs/reference/cat/thread_pool.asciidoc index 163a729e51cc3..bfc5ca415c3ba 100644 --- a/docs/reference/cat/thread_pool.asciidoc +++ b/docs/reference/cat/thread_pool.asciidoc @@ -113,14 +113,15 @@ in the table below. |Field Name |Alias |Description |`type` |`t` |The current (*) type of thread pool (`fixed` or `scaling`) |`active` |`a` |The number of active threads in the current thread pool -|`size` |`s` |The number of threads in the current thread pool +|`pool_size` |`psz` |The number of threads in the current thread pool |`queue` |`q` |The number of tasks in the queue for the current thread pool |`queue_size` |`qs` |The maximum number of tasks permitted in the queue for the current thread pool |`rejected` |`r` |The number of tasks rejected by the thread pool executor |`largest` |`l` |The highest number of active threads in the current thread pool |`completed` |`c` |The number of tasks completed by the thread pool executor -|`min` |`mi` |The configured minimum number of active threads allowed in the current thread pool -|`max` |`ma` |The configured maximum number of active threads allowed in the current thread pool +|`core` |`cr` |The configured core number of active threads allowed in the current thread pool +|`max` |`mx` |The configured maximum number of active threads allowed in the current thread pool +|`size` |`sz` |The configured fixed number of active threads allowed in the current thread pool |`keep_alive` |`k` |The configured keep alive time for threads |======================================================================= diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml index 9cd970341412a..bb16ae391c46d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.thread_pool/10_basic.yml @@ -1,6 +1,10 @@ --- "Test cat thread_pool output": + - skip: + version: " - 6.99.99" + reason: this API was changed in a backwards-incompatible fashion in 7.0.0 so we need to skip in a mixed cluster + - do: cat.thread_pool: {} @@ -46,25 +50,25 @@ - do: cat.thread_pool: thread_pool_patterns: bulk - h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive + h: id,name,type,active,pool_size,queue,queue_size,rejected,largest,completed,core,max,size,keep_alive v: true - match: $body: | - /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n - (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ pool_size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ core \s+ max \s+ size \s+ keep_alive \n + (\S+ \s+ bulk \s+ fixed \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: thread_pool_patterns: fetch* - h: id,name,type,active,size,queue,queue_size,rejected,largest,completed,min,max,keep_alive + h: id,name,type,active,pool_size,queue,queue_size,rejected,largest,completed,core,max,size,keep_alive v: true - match: $body: | - /^ id \s+ name \s+ type \s+ active \s+ size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ min \s+ max \s+ keep_alive \n - (\S+ \s+ fetch_shard_started \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n - \S+ \s+ fetch_shard_store \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \S* \n)+ $/ + /^ id \s+ name \s+ type \s+ active \s+ pool_size \s+ queue \s+ queue_size \s+ rejected \s+ largest \s+ completed \s+ core \s+ max \s+ size \s+ keep_alive \n + (\S+ \s+ fetch_shard_started \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \d* \s+ \S* \n + \S+ \s+ fetch_shard_store \s+ scaling \s+ \d+ \s+ \d+ \s+ \d+ \s+ (-1|\d+) \s+ \d+ \s+ \d+ \s+ \d+ \s+ \d* \s+ \d* \s+ \d* \s+ \S* \n)+ $/ - do: cat.thread_pool: diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java index 0e0f4fe8c155d..3df270c8f6c80 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestThreadPoolAction.java @@ -124,14 +124,15 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("name", "default:true;alias:n;desc:thread pool name"); table.addCell("type", "alias:t;default:false;desc:thread pool type"); table.addCell("active", "alias:a;default:true;text-align:right;desc:number of active threads"); - table.addCell("size", "alias:s;default:false;text-align:right;desc:number of threads"); + table.addCell("pool_size", "alias:psz;default:false;text-align:right;desc:number of threads"); table.addCell("queue", "alias:q;default:true;text-align:right;desc:number of tasks currently in queue"); table.addCell("queue_size", "alias:qs;default:false;text-align:right;desc:maximum number of tasks permitted in queue"); table.addCell("rejected", "alias:r;default:true;text-align:right;desc:number of rejected tasks"); table.addCell("largest", "alias:l;default:false;text-align:right;desc:highest number of seen active threads"); table.addCell("completed", "alias:c;default:false;text-align:right;desc:number of completed tasks"); - table.addCell("min", "alias:mi;default:false;text-align:right;desc:minimum number of threads"); - table.addCell("max", "alias:ma;default:false;text-align:right;desc:maximum number of threads"); + table.addCell("core", "alias:cr;default:false;text-align:right;desc:core number of threads in a scaling thread pool"); + table.addCell("max", "alias:mx;default:false;text-align:right;desc:maximum number of threads in a scaling thread pool"); + table.addCell("size", "alias:sz;default:false;text-align:right;desc:number of threads in a fixed thread pool"); table.addCell("keep_alive", "alias:ka;default:false;text-align:right;desc:thread keep alive time"); table.endHeaders(); return table; @@ -201,8 +202,9 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR Long maxQueueSize = null; String keepAlive = null; - Integer minThreads = null; - Integer maxThreads = null; + Integer core = null; + Integer max = null; + Integer size = null; if (poolInfo != null) { if (poolInfo.getQueueSize() != null) { @@ -211,11 +213,15 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR if (poolInfo.getKeepAlive() != null) { keepAlive = poolInfo.getKeepAlive().toString(); } - if (poolInfo.getMin() >= 0) { - minThreads = poolInfo.getMin(); - } - if (poolInfo.getMax() >= 0) { - maxThreads = poolInfo.getMax(); + + if (poolInfo.getThreadPoolType() == ThreadPool.ThreadPoolType.SCALING) { + assert poolInfo.getMin() >= 0; + core = poolInfo.getMin(); + assert poolInfo.getMax() > 0; + max = poolInfo.getMax(); + } else { + assert poolInfo.getMin() == poolInfo.getMax() && poolInfo.getMax() > 0; + size = poolInfo.getMax(); } } @@ -228,8 +234,9 @@ private Table buildTable(RestRequest req, ClusterStateResponse state, NodesInfoR table.addCell(poolStats == null ? null : poolStats.getRejected()); table.addCell(poolStats == null ? null : poolStats.getLargest()); table.addCell(poolStats == null ? null : poolStats.getCompleted()); - table.addCell(minThreads); - table.addCell(maxThreads); + table.addCell(core); + table.addCell(max); + table.addCell(size); table.addCell(keepAlive); table.endRow(); From 5cdd831a31ed39e4a4971b7cf6cf1aaefba0940f Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Apr 2018 21:23:01 -0400 Subject: [PATCH 55/68] Remove silent batch mode from install plugin (#29359) Today we have a silent batch mode in the install plugin command when standard input is closed or there is no tty. It appears that historically this was useful when running tests where we want to accept plugin permissions without having to acknowledge them. Now that we have an explicit batch mode flag, this use-case is removed. The motivation for removing this now is that there is another place where silent batch mode arises and that is when a user attempts to install a plugin inside a Docker container without keeping standard input open and attaching a tty. In this case, the install plugin command will treat the situation as a silent batch mode and therefore the user will never have the chance to acknowledge the additional permissions required by a plugin. This commit removes this silent batch mode in favor of using the --batch flag when running tests and requiring the user to take explicit action to acknowledge the additional permissions (either by leaving standard input open and attaching a tty, or by passing the --batch flags themselves). Note that with this change the user will now see a null pointer exception when they try to install a plugin in a Docker container without keeping standard input open and attaching a tty. This will be addressed in an immediate follow-up, but because the implications of that change are larger, they should be handled separately from this one. --- .../elasticsearch/gradle/test/ClusterFormationTasks.groovy | 2 +- .../java/org/elasticsearch/plugins/InstallPluginCommand.java | 2 +- .../packaging/tests/module_and_plugin_test_cases.bash | 4 ++-- qa/vagrant/src/test/resources/packaging/utils/plugins.bash | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 4d6b54fa3bbee..8e97ee352ead2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -494,7 +494,7 @@ class ClusterFormationTasks { * the short name requiring the path to already exist. */ final Object esPluginUtil = "${-> node.binPath().resolve('elasticsearch-plugin').toString()}" - final Object[] args = [esPluginUtil, 'install', file] + final Object[] args = [esPluginUtil, 'install', '--batch', file] return configureExecTask(name, project, setup, node, args) } diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java index 84f3764880243..5a14d041c763b 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/InstallPluginCommand.java @@ -208,7 +208,7 @@ protected void printAdditionalHelp(Terminal terminal) { @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { String pluginId = arguments.value(options); - boolean isBatch = options.has(batchOption) || System.console() == null; + final boolean isBatch = options.has(batchOption); execute(terminal, pluginId, isBatch, env); } diff --git a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash index 190f70e9bad0b..a62d690897e37 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash +++ b/qa/vagrant/src/test/resources/packaging/tests/module_and_plugin_test_cases.bash @@ -416,7 +416,7 @@ fi @test "[$GROUP] install a sample plugin with different logging modes and check output" { local relativePath=${1:-$(readlink -m custom-settings-*.zip)} - sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$relativePath" > /tmp/plugin-cli-output # exclude progress line local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) [ "$loglines" -eq "2" ] || { @@ -427,7 +427,7 @@ fi remove_plugin_example local relativePath=${1:-$(readlink -m custom-settings-*.zip)} - sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output + sudo -E -u $ESPLUGIN_COMMAND_USER ES_JAVA_OPTS="-Des.logger.level=DEBUG" "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$relativePath" > /tmp/plugin-cli-output local loglines=$(cat /tmp/plugin-cli-output | grep -v "^[[:cntrl:]]" | wc -l) [ "$loglines" -gt "2" ] || { echo "Expected more than 2 lines excluding progress bar but the output had $loglines lines and was:" diff --git a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash index 403d89b30ecad..f9110b3066295 100644 --- a/qa/vagrant/src/test/resources/packaging/utils/plugins.bash +++ b/qa/vagrant/src/test/resources/packaging/utils/plugins.bash @@ -47,9 +47,9 @@ install_plugin() { fi if [ -z "$umask" ]; then - sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install -batch "file://$path" + sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install --batch "file://$path" else - sudo -E -u $ESPLUGIN_COMMAND_USER bash -c "umask $umask && \"$ESHOME/bin/elasticsearch-plugin\" install -batch \"file://$path\"" + sudo -E -u $ESPLUGIN_COMMAND_USER bash -c "umask $umask && \"$ESHOME/bin/elasticsearch-plugin\" install --batch \"file://$path\"" fi #check we did not accidentially create a log file as root as /usr/share/elasticsearch From 4b1ed20a67375da62927e811fe44f491045ecd7e Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 3 Apr 2018 23:18:51 -0400 Subject: [PATCH 56/68] Add awaits fix for HasChildQueryBuilderTests These tests are failing since 569d0c0e897478be4f05a1daba8d217a526e0eeb. This commit adds an awaits fix for them until they can be addressed. --- .../org/elasticsearch/join/query/HasChildQueryBuilderTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 2d7215c239821..bb0881daf6d12 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -72,6 +73,7 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29362") public class HasChildQueryBuilderTests extends AbstractQueryTestCase { private static final String TYPE = "_doc"; From 1891d4f83de8e91cc33ded6966f0efce57c43f5d Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 4 Apr 2018 10:26:50 +0100 Subject: [PATCH 57/68] Check presence of multi-types before validating new mapping (#29316) Before doing any kind of validation on a new mapping, we should first do the multi-type validation in order to provide better error messages. For #29313, this means that the exception message will be Rejecting mapping update to [range_index_new] as the final mapping would have more than 1 type: [_doc, mytype] instead of [expected_attendees] is defined as an object in mapping [mytype] but this name is already used for a field in other types --- .../index/mapper/MapperService.java | 19 +++++----- .../admin/indices/create/CreateIndexIT.java | 19 ---------- .../index/mapper/MapperServiceTests.java | 20 +++++++++- .../index/mapper/UpdateMappingTests.java | 37 +++++++------------ 4 files changed, 41 insertions(+), 54 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 4f3b045bfc295..e13c23754ab38 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -385,6 +385,16 @@ private synchronized Map internalMerge(@Nullable Documen results.put(DEFAULT_MAPPING, defaultMapper); } + if (indexSettings.isSingleType()) { + Set actualTypes = new HashSet<>(mappers.keySet()); + documentMappers.forEach(mapper -> actualTypes.add(mapper.type())); + actualTypes.remove(DEFAULT_MAPPING); + if (actualTypes.size() > 1) { + throw new IllegalArgumentException( + "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes); + } + } + for (DocumentMapper mapper : documentMappers) { // check naming validateTypeName(mapper.type()); @@ -478,15 +488,6 @@ private synchronized Map internalMerge(@Nullable Documen } } - if (indexSettings.isSingleType()) { - Set actualTypes = new HashSet<>(mappers.keySet()); - actualTypes.remove(DEFAULT_MAPPING); - if (actualTypes.size() > 1) { - throw new IllegalArgumentException( - "Rejecting mapping update to [" + index().getName() + "] as the final mapping would have more than 1 type: " + actualTypes); - } - } - // make structures immutable mappers = Collections.unmodifiableMap(mappers); results = Collections.unmodifiableMap(results); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index df63613b5b97d..c27d9ef65b231 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -264,25 +264,6 @@ public void onFailure(Exception e) { logger.info("total: {}", expected.getHits().getTotalHits()); } - /** - * Asserts that the root cause of mapping conflicts is readable. - */ - public void testMappingConflictRootCause() throws Exception { - CreateIndexRequestBuilder b = prepareCreate("test"); - b.addMapping("type1", jsonBuilder().startObject().startObject("properties") - .startObject("text") - .field("type", "text") - .field("analyzer", "standard") - .field("search_analyzer", "whitespace") - .endObject().endObject().endObject()); - b.addMapping("type2", jsonBuilder().humanReadable(true).startObject().startObject("properties") - .startObject("text") - .field("type", "text") - .endObject().endObject().endObject()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> b.get()); - assertThat(e.getMessage(), containsString("Mapper for [text] conflicts with existing mapping:")); - } - public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java index e130b128ac81c..732fa9bad184c 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MapperServiceTests.java @@ -164,7 +164,7 @@ public void testMappingDepthExceedsLimit() throws Throwable { indexService2.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> indexService1.mapperService().merge("type2", objectMapping, MergeReason.MAPPING_UPDATE)); + () -> indexService1.mapperService().merge("type", objectMapping, MergeReason.MAPPING_UPDATE)); assertThat(e.getMessage(), containsString("Limit of mapping depth [1] in index [test1] has been exceeded")); } @@ -255,7 +255,6 @@ public void testPartitionedConstraints() { // partitioned index cannot have parent/child relationships IllegalArgumentException parentException = expectThrows(IllegalArgumentException.class, () -> { client().admin().indices().prepareCreate("test-index") - .addMapping("parent", "{\"parent\":{\"_routing\":{\"required\":true}}}", XContentType.JSON) .addMapping("child", "{\"child\": {\"_routing\":{\"required\":true}, \"_parent\": {\"type\": \"parent\"}}}", XContentType.JSON) .setSettings(Settings.builder() @@ -307,6 +306,23 @@ public void testForbidMultipleTypes() throws IOException { assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); } + /** + * This test checks that the multi-type validation is done before we do any other kind of validation on the mapping that's added, + * see https://github.com/elastic/elasticsearch/issues/29313 + */ + public void testForbidMultipleTypesWithConflictingMappings() throws IOException { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field1").field("type", "integer_range").endObject().endObject().endObject().endObject()); + MapperService mapperService = createIndex("test").mapperService(); + mapperService.merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE); + + String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type2") + .startObject("properties").startObject("field1").field("type", "integer").endObject().endObject().endObject().endObject()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> mapperService.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), Matchers.startsWith("Rejecting mapping update to [test] as the final mapping would have more than 1 type: ")); + } + public void testDefaultMappingIsRejectedOn7() throws IOException { String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("_default_").endObject().endObject()); MapperService mapperService = createIndex("test").mapperService(); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java index fd9c2e2b375e2..311257b837d1b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UpdateMappingTests.java @@ -117,34 +117,25 @@ public void testConflictSameType() throws Exception { } public void testConflictNewType() throws Exception { - XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("foo").field("type", "long").endObject() .endObject().endObject().endObject(); - MapperService mapperService = createIndex("test", Settings.builder().build(), "type1", mapping).mapperService(); + MapperService mapperService = createIndex("test", Settings.builder().build(), "type", mapping).mapperService(); - XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type2") + XContentBuilder update = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("foo").field("type", "double").endObject() .endObject().endObject().endObject(); try { - mapperService.merge("type2", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); - fail(); - } catch (IllegalArgumentException e) { - // expected - assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); - } - - try { - mapperService.merge("type2", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); + mapperService.merge("type", new CompressedXContent(Strings.toString(update)), MapperService.MergeReason.MAPPING_UPDATE); fail(); } catch (IllegalArgumentException e) { // expected assertTrue(e.getMessage(), e.getMessage().contains("mapper [foo] cannot be changed from type [long] to [double]")); } - assertThat(((FieldMapper) mapperService.documentMapper("type1").mapping().root().getMapper("foo")).fieldType().typeName(), + assertThat(((FieldMapper) mapperService.documentMapper("type").mapping().root().getMapper("foo")).fieldType().typeName(), equalTo("long")); - assertNull(mapperService.documentMapper("type2")); } // same as the testConflictNewType except that the mapping update is on an existing type @@ -208,7 +199,7 @@ public void testReuseMetaField() throws IOException { public void testRejectFieldDefinedTwice() throws IOException { String mapping1 = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type1") + .startObject("type") .startObject("properties") .startObject("foo") .field("type", "object") @@ -216,7 +207,7 @@ public void testRejectFieldDefinedTwice() throws IOException { .endObject() .endObject().endObject()); String mapping2 = Strings.toString(XContentFactory.jsonBuilder().startObject() - .startObject("type2") + .startObject("type") .startObject("properties") .startObject("foo") .field("type", "long") @@ -225,17 +216,15 @@ public void testRejectFieldDefinedTwice() throws IOException { .endObject().endObject()); MapperService mapperService1 = createIndex("test1").mapperService(); - mapperService1.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE); + mapperService1.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> mapperService1.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); - assertThat(e.getMessage(), equalTo("[foo] is defined as a field in mapping [type2" - + "] but this name is already used for an object in other types")); + () -> mapperService1.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), equalTo("Can't merge a non object mapping [foo] with an object mapping [foo]")); MapperService mapperService2 = createIndex("test2").mapperService(); - mapperService2.merge("type2", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); + mapperService2.merge("type", new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE); e = expectThrows(IllegalArgumentException.class, - () -> mapperService2.merge("type1", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); - assertThat(e.getMessage(), equalTo("[foo] is defined as an object in mapping [type1" - + "] but this name is already used for a field in other types")); + () -> mapperService2.merge("type", new CompressedXContent(mapping1), MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), equalTo("mapper [foo] of different type, current_type [long], merged_type [ObjectMapper]")); } } From a19fd5636b8062abd7daf543539ae34aa0281ef1 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 4 Apr 2018 05:40:13 -0400 Subject: [PATCH 58/68] Add awaits fix for a query analyzer test The test QueryAnalyzerTests#testExactMatch_booleanQuery is failing since 8cdd950056b722e7086f48fb84cf6c29ccf31bd3. This commit adds an awaits fix for it until it can be addressed. --- .../java/org/elasticsearch/percolator/QueryAnalyzerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 7bcdcd2e1f695..9f0714b48934e 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -384,6 +384,7 @@ public void testExtractQueryMetadata_booleanQueryWithMustNot() { assertThat(terms.get(1).bytes(), equalTo(phraseQuery.getTerms()[1].bytes())); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29363") public void testExactMatch_booleanQuery() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); From c1ae7e834c2ced9cd68ddad501a528acc3b82c46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 4 Apr 2018 10:47:21 +0200 Subject: [PATCH 59/68] Make TransportRankEvalAction members final --- .../index/rankeval/TransportRankEvalAction.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index d24a779fd61ce..50ab9bcf27271 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -67,9 +67,9 @@ * averaged precision at n. */ public class TransportRankEvalAction extends HandledTransportAction { - private Client client; - private ScriptService scriptService; - private NamedXContentRegistry namedXContentRegistry; + private final Client client; + private final ScriptService scriptService; + private final NamedXContentRegistry namedXContentRegistry; @Inject public TransportRankEvalAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters, From 38a651e5f195036d60282369ca1e4da5830c1d1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?St=C3=A9phane=20Campinas?= Date: Wed, 4 Apr 2018 12:11:29 +0200 Subject: [PATCH 60/68] [Docs] Correct javadoc of GetIndexRequest (#29364) --- .../elasticsearch/action/admin/indices/get/GetIndexRequest.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java index 2a70aa836454e..7ca9a9f11956d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexRequest.java @@ -28,7 +28,7 @@ import java.io.IOException; /** - * A request to delete an index. Best created with {@link org.elasticsearch.client.Requests#deleteIndexRequest(String)}. + * A request to retrieve information about an index. */ public class GetIndexRequest extends ClusterInfoRequest { public enum Feature { From c052e989cfb16c89256dac847d5a31971570a116 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 4 Apr 2018 12:42:52 +0200 Subject: [PATCH 61/68] Fix HasChildQueryBuilderTests to not use the `classic` similarity. Closes #29362 --- .../elasticsearch/join/query/HasChildQueryBuilderTests.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index bb0881daf6d12..f764364380fcf 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; @@ -73,7 +72,6 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.notNullValue; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29362") public class HasChildQueryBuilderTests extends AbstractQueryTestCase { private static final String TYPE = "_doc"; @@ -99,7 +97,7 @@ protected Settings indexSettings() { @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - similarity = randomFrom("classic", "BM25"); + similarity = randomFrom("boolean", "BM25"); XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties") .startObject("join_field") .field("type", "join") From c21057b3a2503cb480ad5e631ecd39d46cbcef42 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 4 Apr 2018 12:47:37 +0200 Subject: [PATCH 62/68] Fix QueryAnalyzerTests. Closes #29363 --- .../org/elasticsearch/percolator/QueryAnalyzerTests.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java index 9f0714b48934e..d9977c388b248 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/QueryAnalyzerTests.java @@ -384,7 +384,6 @@ public void testExtractQueryMetadata_booleanQueryWithMustNot() { assertThat(terms.get(1).bytes(), equalTo(phraseQuery.getTerms()[1].bytes())); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29363") public void testExactMatch_booleanQuery() { BooleanQuery.Builder builder = new BooleanQuery.Builder(); TermQuery termQuery1 = new TermQuery(new Term("_field", "_term1")); @@ -419,12 +418,15 @@ public void testExactMatch_booleanQuery() { assertThat(result.minimumShouldMatch, equalTo(1)); builder = new BooleanQuery.Builder(); - builder.setMinimumNumberShouldMatch(randomIntBetween(1, 2)); + int msm = randomIntBetween(2, 3); + builder.setMinimumNumberShouldMatch(msm); + TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); builder.add(termQuery1, BooleanClause.Occur.SHOULD); builder.add(termQuery2, BooleanClause.Occur.SHOULD); + builder.add(termQuery3, BooleanClause.Occur.SHOULD); result = analyze(builder.build(), Version.CURRENT); assertThat("Minimum match has not impact on whether the result is verified", result.verified, is(true)); - assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(2)); + assertThat("msm is at least two so result.minimumShouldMatch should 2 too", result.minimumShouldMatch, equalTo(msm)); builder = new BooleanQuery.Builder(); builder.add(termQuery1, randomBoolean() ? BooleanClause.Occur.MUST : BooleanClause.Occur.FILTER); @@ -453,7 +455,6 @@ public void testExactMatch_booleanQuery() { assertThat("Prohibited clause, so candidate matches are not verified", result.verified, is(false)); assertThat(result.minimumShouldMatch, equalTo(1)); - TermQuery termQuery3 = new TermQuery(new Term("_field", "_term3")); builder = new BooleanQuery.Builder() .add(new BooleanQuery.Builder() .add(termQuery1, Occur.FILTER) From c95e7539e7c090eba45b8a79c68570907d0b9e4a Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 4 Apr 2018 07:22:13 -0400 Subject: [PATCH 63/68] Enhance error for out of bounds byte size settings (#29338) Today when you input a byte size setting that is out of bounds for the setting, you get an error message that indicates the maximum value of the setting. The problem is that because we use ByteSize#toString, we end up with a representation of the value that does not really tell you what the bound is. For example, if the bound is 2^31 - 1 bytes, the output would be 1.9gb which does not really tell you want the limit as there are many byte size values that we format to the same 1.9gb with ByteSize#toString. We have a method ByteSize#getStringRep that uses the input units to the value as the output units for the string representation, so we end up with no loss if we use this to report the bound. This commit does this. --- .../azure/AzureRepositorySettingsTests.java | 6 +- ...eCloudStorageBlobStoreRepositoryTests.java | 6 +- .../repositories/s3/S3RepositoryTests.java | 4 +- .../common/settings/Setting.java | 17 ++++- .../common/settings/SettingTests.java | 75 +++++++++++++------ 5 files changed, 74 insertions(+), 34 deletions(-) diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 75025332889a7..01b26bad343d5 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -112,17 +112,17 @@ public void testChunkSize() throws StorageException, IOException, URISyntaxExcep // zero bytes is not allowed IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "0").build())); - assertEquals("Failed to parse value [0] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "-1").build())); - assertEquals("Failed to parse value [-1] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> azureRepository(Settings.builder().put("chunk_size", "65mb").build())); - assertEquals("Failed to parse value [65mb] for setting [chunk_size] must be <= 64mb", e.getMessage()); + assertEquals("failed to parse value [65mb] for setting [chunk_size], must be <= [64mb]", e.getMessage()); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index ec166ff867faa..1a173b440659d 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -110,7 +110,7 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "0").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [0] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [0] for setting [chunk_size], must be >= [1b]", e.getMessage()); // negative bytes not allowed e = expectThrows(IllegalArgumentException.class, () -> { @@ -118,7 +118,7 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "-1").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [-1] for setting [chunk_size] must be >= 1b", e.getMessage()); + assertEquals("failed to parse value [-1] for setting [chunk_size], must be >= [1b]", e.getMessage()); // greater than max chunk size not allowed e = expectThrows(IllegalArgumentException.class, () -> { @@ -126,6 +126,6 @@ public void testChunkSize() { Settings.builder().put("chunk_size", "101mb").build()); GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repoMetaData); }); - assertEquals("Failed to parse value [101mb] for setting [chunk_size] must be <= 100mb", e.getMessage()); + assertEquals("failed to parse value [101mb] for setting [chunk_size], must be <= [100mb]", e.getMessage()); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 93508f11c097a..7da65c27d8194 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -70,10 +70,10 @@ public void testInvalidChunkBufferSizeSettings() throws IOException { assertValidBuffer(5, 5); // buffer < 5mb should fail assertInvalidBuffer(4, 10, IllegalArgumentException.class, - "Failed to parse value [4mb] for setting [buffer_size] must be >= 5mb"); + "failed to parse value [4mb] for setting [buffer_size], must be >= [5mb]"); // chunk > 5tb should fail assertInvalidBuffer(5, 6000000, IllegalArgumentException.class, - "Failed to parse value [6000000mb] for setting [chunk_size] must be <= 5tb"); + "failed to parse value [6000000mb] for setting [chunk_size], must be <= [5tb]"); } private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/settings/Setting.java b/server/src/main/java/org/elasticsearch/common/settings/Setting.java index 9575862194db6..9d4ee53aa1aa9 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -47,6 +47,7 @@ import java.util.IdentityHashMap; import java.util.Iterator; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -1070,10 +1071,22 @@ public static Setting byteSizeSetting(String key, Function= " + minValue); + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be >= [%s]", + s, + key, + minValue.getStringRep()); + throw new IllegalArgumentException(message); } if (value.getBytes() > maxValue.getBytes()) { - throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be <= " + maxValue); + final String message = String.format( + Locale.ROOT, + "failed to parse value [%s] for setting [%s], must be <= [%s]", + s, + key, + maxValue.getStringRep()); + throw new IllegalArgumentException(message); } return value; } diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java index 180f11730dfed..187c0e21b4d42 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -52,35 +53,61 @@ public void testGet() { assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); } - public void testByteSize() { - Setting byteSizeValueSetting = - Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); + public void testByteSizeSetting() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), Property.Dynamic, Property.NodeScope); assertFalse(byteSizeValueSetting.isGroupSetting()); - ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); - assertEquals(byteSizeValue.getBytes(), 1024); - - byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); - byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); - assertEquals(byteSizeValue.getBytes(), 2048); - - + final ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertThat(byteSizeValue.getBytes(), equalTo(1024L)); + } + + public void testByteSizeSettingMinValue() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting( + "a.byte.size", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(20_000_000, ByteSizeUnit.BYTES), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES)); + final long value = 20_000_000 - randomIntBetween(1, 1024); + final Settings settings = Settings.builder().put("a.byte.size", value + "b").build(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings)); + final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be >= [20000000b]"; + assertThat(e, hasToString(containsString(expectedMessage))); + } + + public void testByteSizeSettingMaxValue() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting( + "a.byte.size", + new ByteSizeValue(100, ByteSizeUnit.MB), + new ByteSizeValue(16, ByteSizeUnit.MB), + new ByteSizeValue(Integer.MAX_VALUE, ByteSizeUnit.BYTES)); + final long value = (1L << 31) - 1 + randomIntBetween(1, 1024); + final Settings settings = Settings.builder().put("a.byte.size", value + "b").build(); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> byteSizeValueSetting.get(settings)); + final String expectedMessage = "failed to parse value [" + value + "b] for setting [a.byte.size], must be <= [2147483647b]"; + assertThat(e, hasToString(containsString(expectedMessage))); + } + + public void testByteSizeSettingValidation() { + final Setting byteSizeValueSetting = + Setting.byteSizeSetting("a.byte.size", s -> "2048b", Property.Dynamic, Property.NodeScope); + final ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertThat(byteSizeValue.getBytes(), equalTo(2048L)); AtomicReference value = new AtomicReference<>(null); ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger); - try { - settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY); - fail("no unit"); - } catch (IllegalArgumentException ex) { - assertThat(ex, hasToString(containsString("illegal value can't update [a.byte.size] from [2048b] to [12]"))); - assertNotNull(ex.getCause()); - assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); - final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause(); - final String expected = - "failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized"; - assertThat(cause, hasToString(containsString(expected))); - } + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY)); + assertThat(e, hasToString(containsString("illegal value can't update [a.byte.size] from [2048b] to [12]"))); + assertNotNull(e.getCause()); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); + final IllegalArgumentException cause = (IllegalArgumentException) e.getCause(); + final String expected = "failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized"; + assertThat(cause, hasToString(containsString(expected))); assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY)); - assertEquals(new ByteSizeValue(12), value.get()); + assertThat(value.get(), equalTo(new ByteSizeValue(12))); } public void testMemorySize() { From 25d411eb32fd025a3102accdc495510865ea9181 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 4 Apr 2018 14:50:23 +0200 Subject: [PATCH 64/68] Remove undocumented action.master.force_local setting (#29351) `action.master.force_local` was only ever used internally and never documented. It was one of those settings that were automatically added to a tribe node, to make sure that cluster state read operations would work locally rather than failing when trying to forward the request to the master (as the tribe node never had a master). Given that we recently removed the tribe node, we can also remove this setting. --- .../support/master/TransportMasterNodeReadAction.java | 11 +---------- .../common/settings/ClusterSettings.java | 2 -- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java index 4f36929df2755..d427da76a2fa2 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadAction.java @@ -24,8 +24,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -39,11 +37,6 @@ public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends TransportMasterNodeAction { - public static final Setting FORCE_LOCAL_SETTING = - Setting.boolSetting("action.master.force_local", false, Property.NodeScope); - - private final boolean forceLocal; - protected TransportMasterNodeReadAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { @@ -61,7 +54,6 @@ protected TransportMasterNodeReadAction(Settings settings, String actionName, bo IndexNameExpressionResolver indexNameExpressionResolver, Supplier request) { super(settings, actionName, checkSizeLimit, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver,request); - this.forceLocal = FORCE_LOCAL_SETTING.get(settings); } protected TransportMasterNodeReadAction(Settings settings, String actionName, boolean checkSizeLimit, TransportService transportService, @@ -69,11 +61,10 @@ protected TransportMasterNodeReadAction(Settings settings, String actionName, bo Writeable.Reader request, IndexNameExpressionResolver indexNameExpressionResolver) { super(settings, actionName, checkSizeLimit, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); - this.forceLocal = FORCE_LOCAL_SETTING.get(settings); } @Override protected final boolean localExecute(Request request) { - return forceLocal || request.local(); + return request.local(); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index bcfed3388e9f2..ced99fc806527 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; import org.elasticsearch.bootstrap.BootstrapSettings; import org.elasticsearch.client.Client; import org.elasticsearch.client.transport.TransportClient; @@ -373,7 +372,6 @@ public void apply(Settings value, Settings current, Settings previous) { Node.NODE_INGEST_SETTING, Node.NODE_ATTRIBUTES, Node.NODE_LOCAL_STORAGE_SETTING, - TransportMasterNodeReadAction.FORCE_LOCAL_SETTING, AutoCreateIndex.AUTO_CREATE_INDEX_SETTING, BaseRestHandler.MULTI_ALLOW_EXPLICIT_INDEX, ClusterName.CLUSTER_NAME_SETTING, From 08abbdf12942b6a7be549c3a093c96f601a01ed3 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 4 Apr 2018 15:55:26 +0200 Subject: [PATCH 65/68] Use fixture to test repository-url module (#29355) This commit adds a YAML integration test for the repository-url module that uses a fixture to test URL based repositories on both http:// and file:// prefixes. --- modules/repository-url/build.gradle | 26 +- .../RepositoryURLClientYamlTestSuiteIT.java | 85 ++++++- .../repositories/url/URLFixture.java | 162 ++++++++++++ .../test/repository_url/10_basic.yml | 240 ++++++++++++++++-- .../test/repository_url/20_repository.yml | 15 ++ 5 files changed, 510 insertions(+), 18 deletions(-) create mode 100644 modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLFixture.java diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 7008111ca9c54..79fe5e7aaefa7 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -16,12 +16,36 @@ * specific language governing permissions and limitations * under the License. */ +import org.elasticsearch.gradle.test.AntFixture esplugin { description 'Module for URL repository' classname 'org.elasticsearch.plugin.repository.url.URLRepositoryPlugin' } +forbiddenApisTest { + // we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' +} + +// This directory is shared between two URL repositories and one FS repository in YAML integration tests +File repositoryDir = new File(project.buildDir, "shared-repository") + +/** A task to start the URLFixture which exposes the repositoryDir over HTTP **/ +task urlFixture(type: AntFixture) { + doFirst { + repositoryDir.mkdirs() + } + env 'CLASSPATH', "${ -> project.sourceSets.test.runtimeClasspath.asPath }" + executable = new File(project.runtimeJavaHome, 'bin/java') + args 'org.elasticsearch.repositories.url.URLFixture', baseDir, "${repositoryDir.absolutePath}" +} + integTestCluster { - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + dependsOn urlFixture + // repositoryDir is used by a FS repository to create snapshots + setting 'path.repo', "${repositoryDir.absolutePath}" + // repositoryDir is used by two URL repositories to restore snapshots + setting 'repositories.url.allowed_urls', "http://snapshot.test*,http://${ -> urlFixture.addressAndPort }" } diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 238b14ce013ad..79b3f8c5df4bf 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -21,9 +21,31 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; - +import org.apache.http.HttpEntity; +import org.apache.http.entity.ContentType; +import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.URL; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; public class RepositoryURLClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { @@ -35,5 +57,66 @@ public RepositoryURLClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate public static Iterable parameters() throws Exception { return ESClientYamlSuiteTestCase.createParameters(); } + + /** + * This method registers 3 snapshot/restore repositories: + * - repository-fs: this FS repository is used to create snapshots. + * - repository-url: this URL repository is used to restore snapshots created using the previous repository. It uses + * the URLFixture to restore snapshots over HTTP. + * - repository-file: similar as the previous repository but using a file:// prefix instead of http://. + **/ + @Before + public void registerRepositories() throws IOException { + Response clusterSettingsResponse = client().performRequest("GET", "/_cluster/settings?include_defaults=true" + + "&filter_path=defaults.path.repo,defaults.repositories.url.allowed_urls"); + Map clusterSettings = entityAsMap(clusterSettingsResponse); + + @SuppressWarnings("unchecked") + List pathRepo = (List) XContentMapValues.extractValue("defaults.path.repo", clusterSettings); + assertThat(pathRepo, hasSize(1)); + + // Create a FS repository using the path.repo location + Response createFsRepositoryResponse = client().performRequest("PUT", "_snapshot/repository-fs", emptyMap(), + buildRepositorySettings(FsRepository.TYPE, Settings.builder().put("location", pathRepo.get(0)).build())); + assertThat(createFsRepositoryResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + // Create a URL repository using the file://{path.repo} URL + Response createFileRepositoryResponse = client().performRequest("PUT", "_snapshot/repository-file", emptyMap(), + buildRepositorySettings(URLRepository.TYPE, Settings.builder().put("url", "file://" + pathRepo.get(0)).build())); + assertThat(createFileRepositoryResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + // Create a URL repository using the http://{fixture} URL + @SuppressWarnings("unchecked") + List allowedUrls = (List) XContentMapValues.extractValue("defaults.repositories.url.allowed_urls", clusterSettings); + for (String allowedUrl : allowedUrls) { + try { + InetAddress inetAddress = InetAddress.getByName(new URL(allowedUrl).getHost()); + if (inetAddress.isAnyLocalAddress() || inetAddress.isLoopbackAddress()) { + Response createUrlRepositoryResponse = client().performRequest("PUT", "_snapshot/repository-url", emptyMap(), + buildRepositorySettings(URLRepository.TYPE, Settings.builder().put("url", allowedUrl).build())); + assertThat(createUrlRepositoryResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + break; + } + } catch (Exception e) { + logger.debug("Failed to resolve inet address for allowed URL [{}], skipping", allowedUrl); + } + } + } + + private static HttpEntity buildRepositorySettings(final String type, final Settings settings) throws IOException { + try (XContentBuilder builder = jsonBuilder()) { + builder.startObject(); + { + builder.field("type", type); + builder.startObject("settings"); + { + settings.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + builder.endObject(); + } + builder.endObject(); + return new NStringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + } + } } diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLFixture.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLFixture.java new file mode 100644 index 0000000000000..c9a36ec859021 --- /dev/null +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLFixture.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.url; + +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; + +/** + * This {@link URLFixture} exposes a filesystem directory over HTTP. It is used in repository-url + * integration tests to expose a directory created by a regular FS repository. + */ +public class URLFixture { + + public static void main(String[] args) throws Exception { + if (args == null || args.length != 2) { + throw new IllegalArgumentException("URLFixture "); + } + + final InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + final HttpServer httpServer = MockHttpServer.createHttp(socketAddress, 0); + + try { + final Path workingDirectory = dir(args[0]); + /// Writes the PID of the current Java process in a `pid` file located in the working directory + writeFile(workingDirectory, "pid", ManagementFactory.getRuntimeMXBean().getName().split("@")[0]); + + final String addressAndPort = addressToString(httpServer.getAddress()); + // Writes the address and port of the http server in a `ports` file located in the working directory + writeFile(workingDirectory, "ports", addressAndPort); + + // Exposes the repository over HTTP + final String url = "http://" + addressAndPort; + httpServer.createContext("/", new ResponseHandler(dir(args[1]))); + httpServer.start(); + + // Wait to be killed + Thread.sleep(Long.MAX_VALUE); + + } finally { + httpServer.stop(0); + } + } + + @SuppressForbidden(reason = "Paths#get is fine - we don't have environment here") + private static Path dir(final String dir) { + return Paths.get(dir); + } + + private static void writeFile(final Path dir, final String fileName, final String content) throws IOException { + final Path tempPidFile = Files.createTempFile(dir, null, null); + Files.write(tempPidFile, singleton(content)); + Files.move(tempPidFile, dir.resolve(fileName), StandardCopyOption.ATOMIC_MOVE); + } + + private static String addressToString(final SocketAddress address) { + final InetSocketAddress inetSocketAddress = (InetSocketAddress) address; + if (inetSocketAddress.getAddress() instanceof Inet6Address) { + return "[" + inetSocketAddress.getHostString() + "]:" + inetSocketAddress.getPort(); + } else { + return inetSocketAddress.getHostString() + ":" + inetSocketAddress.getPort(); + } + } + + static class ResponseHandler implements HttpHandler { + + private final Path repositoryDir; + + ResponseHandler(final Path repositoryDir) { + this.repositoryDir = repositoryDir; + } + + @Override + public void handle(HttpExchange exchange) throws IOException { + Response response; + if ("GET".equalsIgnoreCase(exchange.getRequestMethod())) { + String path = exchange.getRequestURI().toString(); + if (path.length() > 0 && path.charAt(0) == '/') { + path = path.substring(1); + } + + Path normalizedRepositoryDir = repositoryDir.normalize(); + Path normalizedPath = normalizedRepositoryDir.resolve(path).normalize(); + + if (normalizedPath.startsWith(normalizedRepositoryDir)) { + if (Files.exists(normalizedPath) && Files.isReadable(normalizedPath) && Files.isRegularFile(normalizedPath)) { + byte[] content = Files.readAllBytes(normalizedPath); + Map headers = singletonMap("Content-Length", String.valueOf(content.length)); + response = new Response(RestStatus.OK, headers, "application/octet-stream", content); + } else { + response = new Response(RestStatus.NOT_FOUND, emptyMap(), "text/plain", new byte[0]); + } + } else { + response = new Response(RestStatus.FORBIDDEN, emptyMap(), "text/plain", new byte[0]); + } + } else { + response = new Response(RestStatus.INTERNAL_SERVER_ERROR, emptyMap(), "text/plain", + "Unsupported HTTP method".getBytes(StandardCharsets.UTF_8)); + } + exchange.sendResponseHeaders(response.status.getStatus(), response.body.length); + if (response.body.length > 0) { + exchange.getResponseBody().write(response.body); + } + exchange.close(); + } + } + + /** + * Represents a HTTP Response. + */ + static class Response { + + final RestStatus status; + final Map headers; + final String contentType; + final byte[] body; + + Response(final RestStatus status, final Map headers, final String contentType, final byte[] body) { + this.status = Objects.requireNonNull(status); + this.headers = Objects.requireNonNull(headers); + this.contentType = Objects.requireNonNull(contentType); + this.body = Objects.requireNonNull(body); + } + } +} diff --git a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml index 75e7873299869..7edbc4c08fbf7 100644 --- a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml +++ b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/10_basic.yml @@ -1,6 +1,108 @@ -# Integration tests for URL Repository component +# Integration tests for repository-url # -"URL Repository plugin loaded": +# This test is based on 3 repositories, all registered before this +# test is executed. The repository-fs is used to create snapshots +# in a shared directory on the filesystem. Then the test uses a URL +# repository with a "http://" prefix to test the restore of the +# snapshots. In order to do that it uses a URLFixture that exposes +# the content of the shared directory over HTTP. A second URL +# repository is used to test the snapshot restore but this time +# with a "file://" prefix. +setup: + + # Ensure that the FS repository is registered, so we can create + # snapshots that we later restore using the URL repository + - do: + snapshot.get_repository: + repository: repository-fs + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 1 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 2 + - snapshot: one + - index: + _index: docs + _type: doc + _id: 3 + - snapshot: one + + # Create a first snapshot using the FS repository + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot-one + wait_for_completion: true + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _type: doc + _id: 4 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 5 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 6 + - snapshot: two + - index: + _index: docs + _type: doc + _id: 7 + - snapshot: two + + # Create a second snapshot + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot-two + wait_for_completion: true + + - do: + snapshot.get: + repository: repository-fs + snapshot: snapshot-one,snapshot-two + +--- +teardown: + + - do: + indices.delete: + index: docs + ignore_unavailable: true + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot-one + +--- +"Module repository-url is loaded": - do: cluster.state: {} @@ -10,23 +112,129 @@ - do: nodes.info: {} - - match: { nodes.$master.modules.0.name: repository-url } + - match: { nodes.$master.modules.0.name: repository-url } --- -setup: +"Restore with repository-url using http://": + # Ensure that the URL repository is registered - do: - snapshot.create_repository: - repository: test_repo1 - body: - type: url - settings: - url: "http://snapshot.test1" + snapshot.get_repository: + repository: repository-url + + - match: { repository-url.type : "url" } + - match: { repository-url.settings.url: '/http://(.+):\d+/' } - do: - snapshot.create_repository: - repository: test_repo2 - body: - type: url - settings: - url: "http://snapshot.test2" + snapshot.get: + repository: repository-url + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository-url + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository-url + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + - do: + catch: /cannot delete snapshot from a readonly repository/ + snapshot.delete: + repository: repository-url + snapshot: snapshot-two + +--- +"Restore with repository-url using file://": + + # Ensure that the URL repository is registered + - do: + snapshot.get_repository: + repository: repository-file + + - match: { repository-file.type : "url" } + - match: { repository-file.settings.url: '/file://(.+)/' } + + - do: + snapshot.get: + repository: repository-file + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository-file + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository-file + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + - do: + catch: /cannot delete snapshot from a readonly repository/ + snapshot.delete: + repository: repository-file + snapshot: snapshot-one + diff --git a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml index 39cfeee192c9b..77bdac3ac1573 100644 --- a/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml +++ b/modules/repository-url/src/test/resources/rest-api-spec/test/repository_url/20_repository.yml @@ -14,3 +14,18 @@ repository: test_repo1 - is_true : test_repo1 + +--- +"Repository cannot be be registered": + + - do: + catch: /doesn't match any of the locations specified by path.repo or repositories.url.allowed_urls/ + snapshot.create_repository: + repository: test_repo2 + body: + type: url + settings: + url: "http://snapshot.unknown" + + + From 644e5ea97a7781cf5989015d9410dfa5493c815d Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 4 Apr 2018 17:29:09 +0200 Subject: [PATCH 66/68] Fixed quote_field_suffix in query_string (#29332) This change fixes the handling of the `quote_field_suffix` option on `query_string` query. The expansion was not applied to default fields query. Closes #29324 --- .../index/search/QueryStringQueryParser.java | 3 ++ .../query/QueryStringQueryBuilderTests.java | 31 +++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 0612853cd502f..398f2240a5c43 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -66,6 +66,7 @@ import static org.elasticsearch.common.lucene.search.Queries.newLenientFieldQuery; import static org.elasticsearch.common.lucene.search.Queries.newUnmappedFieldQuery; import static org.elasticsearch.index.search.QueryParserHelper.resolveMappingField; +import static org.elasticsearch.index.search.QueryParserHelper.resolveMappingFields; /** * A {@link XQueryParser} that uses the {@link MapperService} in order to build smarter @@ -264,6 +265,8 @@ private Map extractMultiFields(String field, boolean quoted) { // Filters unsupported fields if a pattern is requested // Filters metadata fields if all fields are requested return resolveMappingField(context, field, 1.0f, !allFields, !multiFields, quoted ? quoteFieldSuffix : null); + } else if (quoted && quoteFieldSuffix != null) { + return resolveMappingFields(context, fieldsAndWeights, quoteFieldSuffix); } else { return fieldsAndWeights; } diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 939f1add0094f..aba7836a5a325 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -1040,6 +1040,37 @@ public void testQuoteAnalyzer() throws Exception { assertEquals(expectedQuery, query); } + public void testQuoteFieldSuffix() throws IOException { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + QueryShardContext context = createShardContext(); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("bar") + .quoteFieldSuffix("_2") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME_2, "bar")), + new QueryStringQueryBuilder("\"bar\"") + .quoteFieldSuffix("_2") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + + // Now check what happens if the quote field does not exist + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("bar") + .quoteFieldSuffix(".quote") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + assertEquals(new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new QueryStringQueryBuilder("\"bar\"") + .quoteFieldSuffix(".quote") + .field(STRING_FIELD_NAME) + .doToQuery(context) + ); + } + public void testToFuzzyQuery() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); From e2d771e319b6cb1401321c720cf2a88b89d8316c Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 4 Apr 2018 22:09:26 +0200 Subject: [PATCH 67/68] Disable failing query in QueryBuilderBWCIT. Relates #29376 --- .../test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java index f3e03f006c5aa..7014b5b5e6420 100644 --- a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java +++ b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java @@ -100,12 +100,13 @@ public class QueryBuilderBWCIT extends ESRestTestCase { new MatchPhraseQueryBuilder("keyword_field", "value").slop(3) ); addCandidate("\"range\": { \"long_field\": {\"gte\": 1, \"lte\": 9}}", new RangeQueryBuilder("long_field").from(1).to(9)); - addCandidate( + // bug url https://github.com/elastic/elasticsearch/issues/29376 + /*addCandidate( "\"bool\": { \"must_not\": [{\"match_all\": {}}], \"must\": [{\"match_all\": {}}], " + "\"filter\": [{\"match_all\": {}}], \"should\": [{\"match_all\": {}}]}", new BoolQueryBuilder().mustNot(new MatchAllQueryBuilder()).must(new MatchAllQueryBuilder()) .filter(new MatchAllQueryBuilder()).should(new MatchAllQueryBuilder()) - ); + );*/ addCandidate( "\"dis_max\": {\"queries\": [{\"match_all\": {}},{\"match_all\": {}},{\"match_all\": {}}], \"tie_breaker\": 0.01}", new DisMaxQueryBuilder().add(new MatchAllQueryBuilder()).add(new MatchAllQueryBuilder()).add(new MatchAllQueryBuilder()) From 2c20f7a16470f68dff2cd0bd8ca68fd84af5aff4 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Wed, 4 Apr 2018 17:39:30 -0400 Subject: [PATCH 68/68] Allow using distance measure in the geo context precision (#29273) Adds support for distance measure, such as "4km", "5m" in the precision field of the geo location context in context suggesters. Fixes #24807 --- .../elasticsearch/common/geo/GeoUtils.java | 47 +++++++++++- .../geogrid/GeoGridAggregationBuilder.java | 29 ++------ .../bucket/geogrid/GeoHashGridParams.java | 9 --- .../completion/context/GeoQueryContext.java | 9 +-- .../common/geo/GeoUtilTests.java | 71 +++++++++++++++++++ .../completion/GeoQueryContextTests.java | 37 ++++++++++ 6 files changed, 164 insertions(+), 38 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index 655b259c81074..ce0098ea9722f 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -24,10 +24,10 @@ import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.util.SloppyMath; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; import org.elasticsearch.index.fielddata.MultiGeoPointValues; @@ -459,6 +459,51 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina } } + /** + * Parse a precision that can be expressed as an integer or a distance measure like "1km", "10m". + * + * The precision is expressed as a number between 1 and 12 and indicates the length of geohash + * used to represent geo points. + * + * @param parser {@link XContentParser} to parse the value from + * @return int representing precision + */ + public static int parsePrecision(XContentParser parser) throws IOException, ElasticsearchParseException { + XContentParser.Token token = parser.currentToken(); + if (token.equals(XContentParser.Token.VALUE_NUMBER)) { + return XContentMapValues.nodeIntegerValue(parser.intValue()); + } else { + String precision = parser.text(); + try { + // we want to treat simple integer strings as precision levels, not distances + return XContentMapValues.nodeIntegerValue(precision); + } catch (NumberFormatException e) { + // try to parse as a distance value + final int parsedPrecision = GeoUtils.geoHashLevelsForPrecision(precision); + try { + return checkPrecisionRange(parsedPrecision); + } catch (IllegalArgumentException e2) { + // this happens when distance too small, so precision > 12. We'd like to see the original string + throw new IllegalArgumentException("precision too high [" + precision + "]", e2); + } + } + } + } + + /** + * Checks that the precision is within range supported by elasticsearch - between 1 and 12 + * + * Returns the precision value if it is in the range and throws an IllegalArgumentException if it + * is outside the range. + */ + public static int checkPrecisionRange(int precision) { + if ((precision < 1) || (precision > 12)) { + throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision + + ". Must be between 1 and 12."); + } + return precision; + } + /** Returns the maximum distance/radius (in meters) from the point 'center' before overlapping */ public static double maxRadialDistanceMeters(final double centerLat, final double centerLon) { if (Math.abs(centerLat) == MAX_LAT) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index f91dde8877093..2f66531834d38 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -54,6 +54,8 @@ import java.util.Map; import java.util.Objects; +import static org.elasticsearch.common.geo.GeoUtils.parsePrecision; + public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { public static final String NAME = "geohash_grid"; @@ -64,29 +66,8 @@ public class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder(GeoGridAggregationBuilder.NAME); ValuesSourceParserHelper.declareGeoFields(PARSER, false, false); - PARSER.declareField((parser, builder, context) -> { - XContentParser.Token token = parser.currentToken(); - if (token.equals(XContentParser.Token.VALUE_NUMBER)) { - builder.precision(XContentMapValues.nodeIntegerValue(parser.intValue())); - } else { - String precision = parser.text(); - try { - // we want to treat simple integer strings as precision levels, not distances - builder.precision(XContentMapValues.nodeIntegerValue(Integer.parseInt(precision))); - } catch (NumberFormatException e) { - // try to parse as a distance value - try { - builder.precision(GeoUtils.geoHashLevelsForPrecision(precision)); - } catch (NumberFormatException e2) { - // can happen when distance unit is unknown, in this case we simply want to know the reason - throw e2; - } catch (IllegalArgumentException e3) { - // this happens when distance too small, so precision > 12. We'd like to see the original string - throw new IllegalArgumentException("precision too high [" + precision + "]", e3); - } - } - } - }, GeoHashGridParams.FIELD_PRECISION, org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); + PARSER.declareField((parser, builder, context) -> builder.precision(parsePrecision(parser)), GeoHashGridParams.FIELD_PRECISION, + org.elasticsearch.common.xcontent.ObjectParser.ValueType.INT); PARSER.declareInt(GeoGridAggregationBuilder::size, GeoHashGridParams.FIELD_SIZE); PARSER.declareInt(GeoGridAggregationBuilder::shardSize, GeoHashGridParams.FIELD_SHARD_SIZE); } @@ -133,7 +114,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } public GeoGridAggregationBuilder precision(int precision) { - this.precision = GeoHashGridParams.checkPrecision(precision); + this.precision = GeoUtils.checkPrecisionRange(precision); return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java index e4b8d753c4018..ff3b21a3a7bae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParams.java @@ -30,15 +30,6 @@ final class GeoHashGridParams { static final ParseField FIELD_SIZE = new ParseField("size"); static final ParseField FIELD_SHARD_SIZE = new ParseField("shard_size"); - - static int checkPrecision(int precision) { - if ((precision < 1) || (precision > 12)) { - throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision - + ". Must be between 1 and 12."); - } - return precision; - } - private GeoHashGridParams() { throw new AssertionError("No instances intended"); } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java index 151dcc9173f28..259446cb0c1df 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/context/GeoQueryContext.java @@ -33,6 +33,7 @@ import java.util.List; import java.util.Objects; +import static org.elasticsearch.common.geo.GeoUtils.parsePrecision; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_BOOST; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_NEIGHBOURS; import static org.elasticsearch.search.suggest.completion.context.GeoContextMapping.CONTEXT_PRECISION; @@ -115,10 +116,10 @@ public static Builder builder() { static { GEO_CONTEXT_PARSER.declareField((parser, geoQueryContext, geoContextMapping) -> geoQueryContext.setGeoPoint(GeoUtils.parseGeoPoint(parser)), new ParseField(CONTEXT_VALUE), ObjectParser.ValueType.OBJECT); GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setBoost, new ParseField(CONTEXT_BOOST)); - // TODO : add string support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareInt(GeoQueryContext.Builder::setPrecision, new ParseField(CONTEXT_PRECISION)); - // TODO : add string array support for precision for GeoUtils.geoHashLevelsForPrecision() - GEO_CONTEXT_PARSER.declareIntArray(GeoQueryContext.Builder::setNeighbours, new ParseField(CONTEXT_NEIGHBOURS)); + GEO_CONTEXT_PARSER.declareField((parser, builder, context) -> builder.setPrecision(parsePrecision(parser)), + new ParseField(CONTEXT_PRECISION), ObjectParser.ValueType.INT); + GEO_CONTEXT_PARSER.declareFieldArray(GeoQueryContext.Builder::setNeighbours, (parser, builder) -> parsePrecision(parser), + new ParseField(CONTEXT_NEIGHBOURS), ObjectParser.ValueType.INT_ARRAY); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLat, new ParseField("lat")); GEO_CONTEXT_PARSER.declareDouble(GeoQueryContext.Builder::setLon, new ParseField("lon")); } diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java new file mode 100644 index 0000000000000..efec56e788da1 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoUtilTests.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.geo; + +import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; + +public class GeoUtilTests extends ESTestCase { + + public void testPrecisionParser() throws IOException { + assertEquals(10, parsePrecision(builder -> builder.field("test", 10))); + assertEquals(10, parsePrecision(builder -> builder.field("test", 10.2))); + assertEquals(6, parsePrecision(builder -> builder.field("test", "6"))); + assertEquals(7, parsePrecision(builder -> builder.field("test", "1km"))); + assertEquals(7, parsePrecision(builder -> builder.field("test", "1.1km"))); + } + + public void testIncorrectPrecisionParser() { + expectThrows(NumberFormatException.class, () -> parsePrecision(builder -> builder.field("test", "10.1.1.1"))); + expectThrows(NumberFormatException.class, () -> parsePrecision(builder -> builder.field("test", "364.4smoots"))); + assertEquals( + "precision too high [0.01mm]", + expectThrows(IllegalArgumentException.class, () -> parsePrecision(builder -> builder.field("test", "0.01mm"))).getMessage() + ); + } + + /** + * Invokes GeoUtils.parsePrecision parser on the value generated by tokenGenerator + *

    + * The supplied tokenGenerator should generate a single field that contains the precision in + * one of the supported formats or malformed precision value if error handling is tested. The + * method return the parsed value or throws an exception, if precision value is malformed. + */ + private int parsePrecision(CheckedConsumer tokenGenerator) throws IOException { + XContentBuilder builder = jsonBuilder().startObject(); + tokenGenerator.accept(builder); + builder.endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); // { + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // field name + assertTrue(parser.nextToken().isValue()); // field value + int precision = GeoUtils.parsePrecision(parser); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); // } + assertNull(parser.nextToken()); // no more tokens + return precision; + } +} diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java index 1d058350a98a5..7764f269a03b3 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/GeoQueryContextTests.java @@ -19,15 +19,20 @@ package org.elasticsearch.search.suggest.completion; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.suggest.completion.context.GeoQueryContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; public class GeoQueryContextTests extends QueryContextTestCase { @@ -105,4 +110,36 @@ public void testIllegalArguments() { assertEquals(e.getMessage(), "neighbour value must be between 1 and 12"); } } + + public void testStringPrecision() throws IOException { + XContentBuilder builder = jsonBuilder().startObject(); + { + builder.startObject("context").field("lat", 23.654242).field("lon", 90.047153).endObject(); + builder.field("boost", 10); + builder.field("precision", 12); + builder.array("neighbours", 1, 2); + } + builder.endObject(); + XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + GeoQueryContext queryContext = fromXContent(parser); + assertEquals(10, queryContext.getBoost()); + assertEquals(12, queryContext.getPrecision()); + assertEquals(Arrays.asList(1, 2), queryContext.getNeighbours()); + + builder = jsonBuilder().startObject(); + { + builder.startObject("context").field("lat", 23.654242).field("lon", 90.047153).endObject(); + builder.field("boost", 10); + builder.field("precision", "12m"); + builder.array("neighbours", "4km", "10km"); + } + builder.endObject(); + parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); + parser.nextToken(); + queryContext = fromXContent(parser); + assertEquals(10, queryContext.getBoost()); + assertEquals(9, queryContext.getPrecision()); + assertEquals(Arrays.asList(6, 5), queryContext.getNeighbours()); + } }