diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index 25ad5bcf89581..7d9537feaea56 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -15,6 +15,7 @@ import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.compressors.bzip2.BZip2CompressorOutputStream; import org.apache.commons.io.IOUtils; +import org.elasticsearch.gradle.OS; import org.elasticsearch.gradle.util.GradleUtils; import org.gradle.api.Plugin; import org.gradle.api.Project; @@ -61,7 +62,7 @@ public void apply(Project target) { ? System.getenv("BUILD_NUMBER") : System.getenv("BUILDKITE_BUILD_NUMBER"); String performanceTest = System.getenv("BUILD_PERFORMANCE_TEST"); - if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false) { + if (buildNumber != null && performanceTest == null && GradleUtils.isIncludedBuild(target) == false && OS.current() != OS.WINDOWS) { File targetFile = calculateTargetFile(target, buildNumber); File projectDir = target.getProjectDir(); File gradleWorkersDir = new File(target.getGradle().getGradleUserHomeDir(), "workers/"); diff --git a/docs/changelog/114819.yaml b/docs/changelog/114819.yaml new file mode 100644 index 0000000000000..f8d03f7024801 --- /dev/null +++ b/docs/changelog/114819.yaml @@ -0,0 +1,6 @@ +pr: 114819 +summary: Don't use a `BytesStreamOutput` to copy keys in `BytesRefBlockHash` +area: EQL +type: bug +issues: + - 114599 diff --git a/docs/changelog/114951.yaml b/docs/changelog/114951.yaml new file mode 100644 index 0000000000000..4d40a063e2b02 --- /dev/null +++ b/docs/changelog/114951.yaml @@ -0,0 +1,5 @@ +pr: 114951 +summary: Expose cluster-state role mappings in APIs +area: Authentication +type: bug +issues: [] diff --git a/docs/changelog/115041.yaml b/docs/changelog/115041.yaml new file mode 100644 index 0000000000000..f4c047c1569ec --- /dev/null +++ b/docs/changelog/115041.yaml @@ -0,0 +1,6 @@ +pr: 115041 +summary: Increase default `queue_capacity` to 10_000 and decrease max `queue_capacity` + to 100_000 +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/115102.yaml b/docs/changelog/115102.yaml new file mode 100644 index 0000000000000..f679bb6c223a6 --- /dev/null +++ b/docs/changelog/115102.yaml @@ -0,0 +1,6 @@ +pr: 115102 +summary: Watch Next Run Interval Resets On Shard Move or Node Restart +area: Watcher +type: bug +issues: + - 111433 diff --git a/docs/changelog/115241.yaml b/docs/changelog/115241.yaml new file mode 100644 index 0000000000000..b7119d7f6aaeb --- /dev/null +++ b/docs/changelog/115241.yaml @@ -0,0 +1,6 @@ +pr: 115241 +summary: "[Security Solution] Add `create_index` to `kibana_system` role for index/DS\ + \ `.logs-endpoint.action.responses-*`" +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/115308.yaml b/docs/changelog/115308.yaml new file mode 100644 index 0000000000000..163f0232a3e58 --- /dev/null +++ b/docs/changelog/115308.yaml @@ -0,0 +1,6 @@ +pr: 115308 +summary: "ESQL: Disable pushdown of WHERE past STATS" +area: ES|QL +type: bug +issues: + - 115281 diff --git a/docs/changelog/115312.yaml b/docs/changelog/115312.yaml new file mode 100644 index 0000000000000..acf6bbc69c36c --- /dev/null +++ b/docs/changelog/115312.yaml @@ -0,0 +1,6 @@ +pr: 115312 +summary: "ESQL: Fix filtered grouping on ords" +area: ES|QL +type: bug +issues: + - 114897 diff --git a/docs/changelog/115317.yaml b/docs/changelog/115317.yaml new file mode 100644 index 0000000000000..153f7a52f0674 --- /dev/null +++ b/docs/changelog/115317.yaml @@ -0,0 +1,5 @@ +pr: 115317 +summary: Revert "Add `ResolvedExpression` wrapper" +area: Indices APIs +type: bug +issues: [] diff --git a/docs/changelog/115359.yaml b/docs/changelog/115359.yaml new file mode 100644 index 0000000000000..65b3086dfc8d0 --- /dev/null +++ b/docs/changelog/115359.yaml @@ -0,0 +1,6 @@ +pr: 115359 +summary: Adding support for simulate ingest mapping adddition for indices with mappings + that do not come from templates +area: Ingest Node +type: enhancement +issues: [] diff --git a/docs/changelog/115404.yaml b/docs/changelog/115404.yaml new file mode 100644 index 0000000000000..e443b152955f3 --- /dev/null +++ b/docs/changelog/115404.yaml @@ -0,0 +1,5 @@ +pr: 115404 +summary: Fix NPE in Get Deployment Stats +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/115414.yaml b/docs/changelog/115414.yaml new file mode 100644 index 0000000000000..7475b765bb30e --- /dev/null +++ b/docs/changelog/115414.yaml @@ -0,0 +1,9 @@ +pr: 115414 +summary: Mitigate IOSession timeouts +area: Machine Learning +type: bug +issues: + - 114385 + - 114327 + - 114105 + - 114232 diff --git a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json index bafbcf2bc2038..07ffe84444f02 100644 --- a/docs/reference/esql/functions/kibana/definition/to_date_nanos.json +++ b/docs/reference/esql/functions/kibana/definition/to_date_nanos.json @@ -5,5 +5,6 @@ "description" : "Converts an input to a nanosecond-resolution date value (aka date_nanos).", "note" : "The range for date nanos is 1970-01-01T00:00:00.000000000Z to 2262-04-11T23:47:16.854775807Z. Additionally, integers cannot be converted into date nanos, as the range of integer nanoseconds only covers about 2 seconds after epoch.", "signatures" : [ ], - "preview" : true + "preview" : true, + "snapshot_only" : false } diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 7e207146e38e3..18052cfb64e8f 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -6,7 +6,7 @@ include::links.asciidoc[] include::landing-page.asciidoc[] -include::release-notes/highlights.asciidoc[] +// overview / install include::intro.asciidoc[] @@ -14,33 +14,37 @@ include::quickstart/index.asciidoc[] include::setup.asciidoc[] -include::upgrade.asciidoc[] +// search solution -include::index-modules.asciidoc[] +include::search/search-your-data/search-your-data.asciidoc[] -include::mapping.asciidoc[] +include::reranking/index.asciidoc[] -include::analysis.asciidoc[] +// data management + +include::index-modules.asciidoc[] include::indices/index-templates.asciidoc[] -include::data-streams/data-streams.asciidoc[] +include::alias.asciidoc[] -include::ingest.asciidoc[] +include::mapping.asciidoc[] -include::alias.asciidoc[] +include::analysis.asciidoc[] -include::search/search-your-data/search-your-data.asciidoc[] +include::ingest.asciidoc[] -include::reranking/index.asciidoc[] +include::connector/docs/index.asciidoc[] -include::query-dsl.asciidoc[] +include::data-streams/data-streams.asciidoc[] -include::aggregations.asciidoc[] +include::data-management.asciidoc[] -include::geospatial-analysis.asciidoc[] +include::data-rollup-transform.asciidoc[] -include::connector/docs/index.asciidoc[] +// analysis tools + +include::query-dsl.asciidoc[] include::eql/eql.asciidoc[] @@ -50,34 +54,48 @@ include::sql/index.asciidoc[] include::scripting.asciidoc[] -include::data-management.asciidoc[] +include::aggregations.asciidoc[] -include::autoscaling/index.asciidoc[] +include::geospatial-analysis.asciidoc[] + +include::watcher/index.asciidoc[] + +// cluster management include::monitoring/index.asciidoc[] -include::data-rollup-transform.asciidoc[] +include::security/index.asciidoc[] + +// production tasks include::high-availability.asciidoc[] +include::how-to.asciidoc[] + +include::autoscaling/index.asciidoc[] + include::snapshot-restore/index.asciidoc[] -include::security/index.asciidoc[] +// reference -include::watcher/index.asciidoc[] +include::rest-api/index.asciidoc[] include::commands/index.asciidoc[] -include::how-to.asciidoc[] - include::troubleshooting.asciidoc[] -include::rest-api/index.asciidoc[] +// upgrades + +include::upgrade.asciidoc[] include::migration/index.asciidoc[] +include::release-notes/highlights.asciidoc[] + include::release-notes.asciidoc[] include::dependencies-versions.asciidoc[] +// etc + include::redirects.asciidoc[] \ No newline at end of file diff --git a/docs/reference/mapping/types/binary.asciidoc b/docs/reference/mapping/types/binary.asciidoc index 5733a28eb711a..81ba44c954e0a 100644 --- a/docs/reference/mapping/types/binary.asciidoc +++ b/docs/reference/mapping/types/binary.asciidoc @@ -68,8 +68,16 @@ Synthetic source may sort `binary` values in order of their byte representation. ---- PUT idx { + "settings": { + "index": { + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + }, "mappings": { - "_source": { "mode": "synthetic" }, "properties": { "binary": { "type": "binary", "doc_values": true } } diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 81d46b5773877..c3f6fb43f2ffd 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,5 +1,6 @@ +[chapter] [[release-highlights]] -== What's new in {minor-version} += What's new in {minor-version} coming::[{minor-version}] @@ -37,7 +38,7 @@ endif::[] [discrete] [[esql_inlinestats]] -=== ESQL: INLINESTATS +== ESQL: INLINESTATS This adds the `INLINESTATS` command to ESQL which performs a STATS and then enriches the results into the output stream. So, this query: @@ -62,7 +63,7 @@ Produces output like: [discrete] [[always_allow_rebalancing_by_default]] -=== Always allow rebalancing by default +== Always allow rebalancing by default In earlier versions of {es} the `cluster.routing.allocation.allow_rebalance` setting defaults to `indices_all_active` which blocks all rebalancing moves while the cluster is in `yellow` or `red` health. This was appropriate for the legacy allocator which might do too many rebalancing moves otherwise. Today's allocator has @@ -74,7 +75,7 @@ version 8.16 `allow_rebalance` setting defaults to `always` unless the legacy al [discrete] [[add_global_retention_in_data_stream_lifecycle]] -=== Add global retention in data stream lifecycle +== Add global retention in data stream lifecycle Data stream lifecycle now supports configuring retention on a cluster level, namely global retention. Global retention \nallows us to configure two different retentions: @@ -88,7 +89,7 @@ data stream lifecycle and it allows any data stream \ndata to be deleted after t [discrete] [[enable_zstandard_compression_for_indices_with_index_codec_set_to_best_compression]] -=== Enable ZStandard compression for indices with index.codec set to best_compression +== Enable ZStandard compression for indices with index.codec set to best_compression Before DEFLATE compression was used to compress stored fields in indices with index.codec index setting set to best_compression, with this change ZStandard is used as compression algorithm to stored fields for indices with index.codec index setting set to best_compression. The usage ZStandard results in less storage usage with a diff --git a/docs/reference/watcher/how-watcher-works.asciidoc b/docs/reference/watcher/how-watcher-works.asciidoc index ed6e49b72e9ce..e34d4f799d99b 100644 --- a/docs/reference/watcher/how-watcher-works.asciidoc +++ b/docs/reference/watcher/how-watcher-works.asciidoc @@ -146,15 +146,18 @@ add, the more distributed the watches can be executed. If you add or remove replicas, all watches need to be reloaded. If a shard is relocated, the primary and all replicas of this particular shard will reload. -Because the watches are executed on the node, where the watch shards are, you can create -dedicated watcher nodes by using shard allocation filtering. +Because the watches are executed on the node, where the watch shards are, you +can create dedicated watcher nodes by using shard allocation filtering. To do this +, configure nodes with a dedicated `node.attr.role: watcher` property. -You could configure nodes with a dedicated `node.attr.role: watcher` property and -then configure the `.watches` index like this: +As the `.watches` index is a system index, you can't use the normal `.watcher/_settings` +endpoint to modify its routing allocation. Instead, you can use the following dedicated +endpoint to adjust the allocation of the `.watches` shards to the nodes with the +`watcher` role attribute: [source,console] ------------------------ -PUT .watches/_settings +PUT _watcher/settings { "index.routing.allocation.include.role": "watcher" } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index a2557a4de6e6d..29ec326548f2b 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.IndexDocFailureStoreStatus; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; @@ -170,7 +171,7 @@ public void testTimeRanges() throws Exception { var indexRequest = new IndexRequest("k8s").opType(DocWriteRequest.OpType.CREATE); time = randomBoolean() ? endTime : endTime.plusSeconds(randomIntBetween(1, 99)); indexRequest.source(DOC.replace("$time", formatInstant(time)), XContentType.JSON); - expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest).actionGet()); + expectThrows(IndexDocFailureStoreStatus.ExceptionWithFailureStoreStatus.class, () -> client().index(indexRequest).actionGet()); } // Fetch UpdateTimeSeriesRangeService and increment time range of latest backing index: diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java index ab7e590b1631e..f60a3e5c47a7f 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamFeatures.java @@ -26,6 +26,7 @@ public class DataStreamFeatures implements FeatureSpecification { public static final NodeFeature DATA_STREAM_LIFECYCLE = new NodeFeature("data_stream.lifecycle"); + public static final NodeFeature DATA_STREAM_FAILURE_STORE_TSDB_FIX = new NodeFeature("data_stream.failure_store.tsdb_fix"); @Override public Map getHistoricalFeatures() { @@ -41,4 +42,9 @@ public Set getFeatures() { DataStreamGlobalRetention.GLOBAL_RETENTION // Added in 8.14 ); } + + @Override + public Set getTestFeatures() { + return Set.of(DATA_STREAM_FAILURE_STORE_TSDB_FIX); + } } diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml index 56f387c016261..de5cf3baa744e 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/150_tsdb.yml @@ -182,6 +182,107 @@ index without timestamp: body: - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' +--- +TSDB failures go to failure store: + - requires: + cluster_features: ["data_stream.failure_store.tsdb_fix"] + reason: "tests tsdb failure store fixes in 8.16.0 that catch timestamp errors that happen earlier in the process and redirect them to the failure store." + + - do: + allowed_warnings: + - "index template [my-template2] has index patterns [fs-k8s*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation" + indices.put_index_template: + name: my-template2 + body: + index_patterns: [ "fs-k8s*" ] + data_stream: + failure_store: true + template: + settings: + index: + mode: time_series + number_of_replicas: 1 + number_of_shards: 2 + routing_path: [ metricset, time_series_dimension ] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + name: + type: keyword + ip: + type: ip + network: + properties: + tx: + type: long + rx: + type: long + - do: + index: + index: fs-k8s + body: + - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - match: { result : "created"} + - match: { failure_store : "used"} + + - do: + bulk: + refresh: true + body: + - '{ "create": { "_index": "fs-k8s"} }' + - '{"@timestamp":"2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "k8s"} }' + - '{ "@timestamp": "2021-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "fs-k8s"} }' + - '{ "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "fs-k8s"} }' + - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "k8s"} }' + - '{"metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - '{ "create": { "_index": "k8s"} }' + - '{ "@timestamp":"2000-04-28T01:00:00ZZ", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}}' + - is_true: errors + + # Successfully indexed to backing index + - match: { items.0.create._index: '/\.ds-fs-k8s-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.0.create.status: 201 } + - is_false: items.0.create.failure_store + - match: { items.1.create._index: '/\.ds-k8s-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.1.create.status: 201 } + - is_false: items.1.create.failure_store + + # Successfully indexed to failure store + - match: { items.2.create._index: '/\.fs-fs-k8s-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { items.2.create.status: 201 } + - match: { items.2.create.failure_store: used } + - match: { items.3.create._index: '/\.fs-fs-k8s-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { items.3.create.status: 201 } + - match: { items.3.create.failure_store: used } + + # Rejected, eligible to go to failure store, but failure store not enabled + - match: { items.4.create._index: 'k8s' } + - match: { items.4.create.status: 400 } + - match: { items.4.create.error.type: timestamp_error } + - match: { items.4.create.failure_store: not_enabled } + - match: { items.4.create._index: 'k8s' } + - match: { items.4.create.status: 400 } + - match: { items.4.create.error.type: timestamp_error } + - match: { items.4.create.failure_store: not_enabled } + --- index without timestamp with pipeline: - do: diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml index cb5578a282dc9..9b5a9dae8bc0a 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/190_failure_store_redirection.yml @@ -879,7 +879,7 @@ teardown: # Successfully indexed to backing index - match: { items.0.create._index: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } - match: { items.0.create.status: 201 } - - is_false: items.1.create.failure_store + - is_false: items.0.create.failure_store # Rejected but not eligible to go to failure store - match: { items.1.create._index: '/\.ds-logs-foobar-(\d{4}\.\d{2}\.\d{2}-)?000001/' } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index eacf2e5a2ee57..61ca050d91c13 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -43,13 +43,14 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.DATABASES_INDEX; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; @@ -238,14 +239,11 @@ public void clusterChanged(ClusterChangedEvent event) { } static boolean hasAtLeastOneGeoipProcessor(ClusterState clusterState) { - if (pipelineConfigurationsWithGeoIpProcessor(clusterState, true).isEmpty() == false) { + if (pipelinesWithGeoIpProcessor(clusterState, true).isEmpty() == false) { return true; } - Set checkReferencedPipelines = pipelineConfigurationsWithGeoIpProcessor(clusterState, false).stream() - .map(PipelineConfiguration::getId) - .collect(Collectors.toSet()); - + final Set checkReferencedPipelines = pipelinesWithGeoIpProcessor(clusterState, false); if (checkReferencedPipelines.isEmpty()) { return false; } @@ -258,22 +256,24 @@ static boolean hasAtLeastOneGeoipProcessor(ClusterState clusterState) { } /** - * Retrieve list of pipelines that have at least one geoip processor. + * Retrieve the set of pipeline ids that have at least one geoip processor. * @param clusterState Cluster state. * @param downloadDatabaseOnPipelineCreation Filter the list to include only pipeline with the download_database_on_pipeline_creation * matching the param. - * @return A list of {@link PipelineConfiguration} matching criteria. + * @return A set of pipeline ids matching criteria. */ @SuppressWarnings("unchecked") - private static List pipelineConfigurationsWithGeoIpProcessor( - ClusterState clusterState, - boolean downloadDatabaseOnPipelineCreation - ) { - List pipelineDefinitions = IngestService.getPipelines(clusterState); - return pipelineDefinitions.stream().filter(pipelineConfig -> { - List> processors = (List>) pipelineConfig.getConfigAsMap().get(Pipeline.PROCESSORS_KEY); - return hasAtLeastOneGeoipProcessor(processors, downloadDatabaseOnPipelineCreation); - }).toList(); + private static Set pipelinesWithGeoIpProcessor(ClusterState clusterState, boolean downloadDatabaseOnPipelineCreation) { + List configurations = IngestService.getPipelines(clusterState); + Set ids = new HashSet<>(); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + for (PipelineConfiguration configuration : configurations) { + List> processors = (List>) configuration.getConfigAsMap().get(Pipeline.PROCESSORS_KEY); + if (hasAtLeastOneGeoipProcessor(processors, downloadDatabaseOnPipelineCreation)) { + ids.add(configuration.getId()); + } + } + return Collections.unmodifiableSet(ids); } /** @@ -283,7 +283,15 @@ private static List pipelineConfigurationsWithGeoIpProces * @return true if a geoip processor is found in the processor list. */ private static boolean hasAtLeastOneGeoipProcessor(List> processors, boolean downloadDatabaseOnPipelineCreation) { - return processors != null && processors.stream().anyMatch(p -> hasAtLeastOneGeoipProcessor(p, downloadDatabaseOnPipelineCreation)); + if (processors != null) { + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + for (Map processor : processors) { + if (hasAtLeastOneGeoipProcessor(processor, downloadDatabaseOnPipelineCreation)) { + return true; + } + } + } + return false; } /** @@ -301,14 +309,14 @@ private static boolean hasAtLeastOneGeoipProcessor(Map processor { final Map processorConfig = (Map) processor.get(GEOIP_TYPE); if (processorConfig != null) { - return downloadDatabaseOnPipelineCreation(GEOIP_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation; + return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation; } } { final Map processorConfig = (Map) processor.get(IP_LOCATION_TYPE); if (processorConfig != null) { - return downloadDatabaseOnPipelineCreation(IP_LOCATION_TYPE, processorConfig, null) == downloadDatabaseOnPipelineCreation; + return downloadDatabaseOnPipelineCreation(processorConfig) == downloadDatabaseOnPipelineCreation; } } @@ -317,7 +325,7 @@ private static boolean hasAtLeastOneGeoipProcessor(Map processor } /** - * Check if a processor config is has an on_failure clause containing at least a geoip processor. + * Check if a processor config has an on_failure clause containing at least a geoip processor. * @param processor Processor config. * @param downloadDatabaseOnPipelineCreation Should the download_database_on_pipeline_creation of the geoip processor be true or false. * @return true if a geoip processor is found in the processor list. @@ -327,16 +335,17 @@ private static boolean isProcessorWithOnFailureGeoIpProcessor( Map processor, boolean downloadDatabaseOnPipelineCreation ) { - return processor != null - && processor.values() - .stream() - .anyMatch( - value -> value instanceof Map - && hasAtLeastOneGeoipProcessor( - ((Map>>) value).get("on_failure"), - downloadDatabaseOnPipelineCreation - ) - ); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + for (Object value : processor.values()) { + if (value instanceof Map + && hasAtLeastOneGeoipProcessor( + ((Map>>) value).get("on_failure"), + downloadDatabaseOnPipelineCreation + )) { + return true; + } + } + return false; } /** diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java index 9508bf0346058..f99f8dbe2fdd0 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpProcessor.java @@ -238,9 +238,8 @@ public Processor create( boolean ignoreMissing = readBooleanProperty(type, processorTag, config, "ignore_missing", false); boolean firstOnly = readBooleanProperty(type, processorTag, config, "first_only", true); - // Validating the download_database_on_pipeline_creation even if the result - // is not used directly by the factory. - downloadDatabaseOnPipelineCreation(type, config, processorTag); + // validate (and consume) the download_database_on_pipeline_creation property even though the result is not used by the factory + readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true); // noop, should be removed in 9.0 Object value = config.remove("fallback_to_default_databases"); @@ -319,8 +318,15 @@ public Processor create( ); } - public static boolean downloadDatabaseOnPipelineCreation(String type, Map config, String processorTag) { - return readBooleanProperty(type, processorTag, config, "download_database_on_pipeline_creation", true); + /** + * Get the value of the "download_database_on_pipeline_creation" property from a processor's config map. + *

+ * As with the actual property definition, the default value of the property is 'true'. Unlike the actual + * property definition, this method doesn't consume (that is, config.remove) the property from + * the config map. + */ + public static boolean downloadDatabaseOnPipelineCreation(Map config) { + return (boolean) config.getOrDefault("download_database_on_pipeline_creation", true); } } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java index cd252fcff2376..5904169308fab 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/MatchOnlyTextFieldMapper.java @@ -364,8 +364,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); // MatchOnlyText never has norms, so we have to use the field names field BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup, sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, lookup); } @Override diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java index 1f647cb977cf5..b845545133e19 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/ScaledFloatFieldMapper.java @@ -319,8 +319,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup, sourceMode); + return new BlockSourceReader.DoublesBlockLoader(valueFetcher, lookup); } @Override diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java new file mode 100644 index 0000000000000..4a001bb2d0969 --- /dev/null +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/index/reindex/ReindexNodeShutdownIT.java @@ -0,0 +1,139 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.index.reindex; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.node.ShutdownPrepareService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.node.ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; + +/** + * Test that a wait added during shutdown is necessary for a large reindexing task to complete. + * The test works as follows: + * 1. Start a large (reasonably long running) reindexing request on the coordinator-only node. + * 2. Check that the reindexing task appears on the coordinating node + * 3. With a 10s timeout value for MAXIMUM_REINDEXING_TIMEOUT_SETTING, + * wait for the reindexing task to complete before closing the node + * 4. Confirm that the reindexing task succeeds with the wait (it will fail without it) + */ +@ESIntegTestCase.ClusterScope(numDataNodes = 0, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST) +public class ReindexNodeShutdownIT extends ESIntegTestCase { + + protected static final String INDEX = "reindex-shutdown-index"; + protected static final String DEST_INDEX = "dest-index"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(ReindexPlugin.class); + } + + protected ReindexRequestBuilder reindex(String nodeName) { + return new ReindexRequestBuilder(internalCluster().client(nodeName)); + } + + public void testReindexWithShutdown() throws Exception { + final String masterNodeName = internalCluster().startMasterOnlyNode(); + final String dataNodeName = internalCluster().startDataOnlyNode(); + + final Settings COORD_SETTINGS = Settings.builder() + .put(MAXIMUM_REINDEXING_TIMEOUT_SETTING.getKey(), TimeValue.timeValueSeconds(10)) + .build(); + final String coordNodeName = internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + ensureStableCluster(3); + + int numDocs = 20000; + createIndex(numDocs); + createReindexTaskAndShutdown(coordNodeName); + checkDestinationIndex(dataNodeName, numDocs); + } + + private void createIndex(int numDocs) { + // INDEX will be created on the dataNode + createIndex(INDEX); + + logger.debug("setting up [{}] docs", numDocs); + indexRandom( + true, + false, + true, + IntStream.range(0, numDocs) + .mapToObj(i -> prepareIndex(INDEX).setId(String.valueOf(i)).setSource("n", i)) + .collect(Collectors.toList()) + ); + + // Checks that the all documents have been indexed and correctly counted + assertHitCount(prepareSearch(INDEX).setSize(0).setTrackTotalHits(true), numDocs); + } + + private void createReindexTaskAndShutdown(final String coordNodeName) throws Exception { + AbstractBulkByScrollRequestBuilder builder = reindex(coordNodeName).source(INDEX).destination(DEST_INDEX); + AbstractBulkByScrollRequest reindexRequest = builder.request(); + ShutdownPrepareService shutdownPrepareService = internalCluster().getInstance(ShutdownPrepareService.class, coordNodeName); + + TaskManager taskManager = internalCluster().getInstance(TransportService.class, coordNodeName).getTaskManager(); + + // Now execute the reindex action... + ActionListener reindexListener = new ActionListener() { + @Override + public void onResponse(BulkByScrollResponse bulkByScrollResponse) { + assertNull(bulkByScrollResponse.getReasonCancelled()); + logger.debug(bulkByScrollResponse.toString()); + } + + @Override + public void onFailure(Exception e) { + logger.debug("Encounterd " + e.toString()); + fail(e, "Encounterd " + e.toString()); + } + }; + internalCluster().client(coordNodeName).execute(ReindexAction.INSTANCE, reindexRequest, reindexListener); + + // Check for reindex task to appear in the tasks list and Immediately stop coordinating node + waitForTask(ReindexAction.INSTANCE.name(), coordNodeName); + shutdownPrepareService.prepareForShutdown(taskManager); + internalCluster().stopNode(coordNodeName); + } + + // Make sure all documents from the source index have been reindexed into the destination index + private void checkDestinationIndex(String dataNodeName, int numDocs) throws Exception { + assertTrue(indexExists(DEST_INDEX)); + flushAndRefresh(DEST_INDEX); + assertBusy(() -> { assertHitCount(prepareSearch(DEST_INDEX).setSize(0).setTrackTotalHits(true), numDocs); }); + } + + private static void waitForTask(String actionName, String nodeName) throws Exception { + assertBusy(() -> { + ListTasksResponse tasks = clusterAdmin().prepareListTasks(nodeName).setActions(actionName).setDetailed(true).get(); + tasks.rethrowFailures("Find my task"); + for (TaskInfo taskInfo : tasks.getTasks()) { + // Skip tasks with a parent because those are children of the task we want + if (taskInfo.parentTaskId().isSet() == false) return; + } + fail("Couldn't find task after waiting, tasks=" + tasks.getTasks()); + }, 10, TimeUnit.SECONDS); + } +} diff --git a/muted-tests.yml b/muted-tests.yml index 971fc161c4632..8b9c3cc6ce712 100644 --- a/muted-tests.yml +++ b/muted-tests.yml @@ -17,18 +17,6 @@ tests: - class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT" issue: "https://github.com/elastic/elasticsearch/issues/108628" method: "testDeprecatedSettingsReturnWarnings" -- class: org.elasticsearch.index.store.FsDirectoryFactoryTests - method: testStoreDirectory - issue: https://github.com/elastic/elasticsearch/issues/110210 -- class: org.elasticsearch.index.store.FsDirectoryFactoryTests - method: testPreload - issue: https://github.com/elastic/elasticsearch/issues/110211 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testMinVersionAsNewVersion - issue: https://github.com/elastic/elasticsearch/issues/95384 -- class: org.elasticsearch.backwards.SearchWithMinCompatibleSearchNodeIT - method: testCcsMinimizeRoundtripsIsFalse - issue: https://github.com/elastic/elasticsearch/issues/101974 - class: "org.elasticsearch.xpack.searchablesnapshots.FrozenSearchableSnapshotsIntegTests" issue: "https://github.com/elastic/elasticsearch/issues/110408" method: "testCreateAndRestorePartialSearchableSnapshot" @@ -99,9 +87,6 @@ tests: issue: https://github.com/elastic/elasticsearch/issues/112424 - class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT issue: https://github.com/elastic/elasticsearch/issues/111497 -- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT - method: test {yaml=ingest/80_ingest_simulate/Test ingest simulate with reroute and mapping validation from templates} - issue: https://github.com/elastic/elasticsearch/issues/112575 - class: org.elasticsearch.xpack.security.authc.kerberos.SimpleKdcLdapServerTests method: testClientServiceMutualAuthentication issue: https://github.com/elastic/elasticsearch/issues/112529 @@ -146,18 +131,12 @@ tests: - class: org.elasticsearch.action.admin.cluster.node.stats.NodeStatsTests method: testChunking issue: https://github.com/elastic/elasticsearch/issues/113139 -- class: org.elasticsearch.xpack.inference.rest.ServerSentEventsRestActionListenerTests - method: testResponse - issue: https://github.com/elastic/elasticsearch/issues/113148 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test30StartStop issue: https://github.com/elastic/elasticsearch/issues/113160 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test33JavaChanged issue: https://github.com/elastic/elasticsearch/issues/113177 -- class: org.elasticsearch.xpack.inference.rest.ServerSentEventsRestActionListenerTests - method: testErrorMidStream - issue: https://github.com/elastic/elasticsearch/issues/113179 - class: org.elasticsearch.xpack.esql.qa.multi_node.EsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113054 @@ -170,9 +149,6 @@ tests: - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test80JavaOptsInEnvVar issue: https://github.com/elastic/elasticsearch/issues/113219 -- class: org.elasticsearch.xpack.esql.expression.function.aggregate.AvgTests - method: "testFold {TestCase= #2}" - issue: https://github.com/elastic/elasticsearch/issues/113225 - class: org.elasticsearch.packaging.test.WindowsServiceTests method: test81JavaOptsInJvmOptions issue: https://github.com/elastic/elasticsearch/issues/113313 @@ -212,8 +188,6 @@ tests: - class: org.elasticsearch.xpack.esql.qa.mixed.MixedClusterEsqlSpecIT method: test {categorize.Categorize SYNC} issue: https://github.com/elastic/elasticsearch/issues/113722 -- class: org.elasticsearch.xpack.esql.expression.function.scalar.convert.ToDateNanosTests - issue: https://github.com/elastic/elasticsearch/issues/113661 - class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT method: testNonGzippedDatabase issue: https://github.com/elastic/elasticsearch/issues/113821 @@ -238,36 +212,18 @@ tests: - class: org.elasticsearch.xpack.inference.TextEmbeddingCrudIT method: testPutE5Small_withPlatformSpecificVariant issue: https://github.com/elastic/elasticsearch/issues/113950 -- class: org.elasticsearch.xpack.inference.services.openai.OpenAiServiceTests - method: testInfer_StreamRequest_ErrorResponse - issue: https://github.com/elastic/elasticsearch/issues/114105 - class: org.elasticsearch.xpack.inference.InferenceCrudIT method: testGet issue: https://github.com/elastic/elasticsearch/issues/114135 -- class: org.elasticsearch.xpack.esql.expression.function.aggregate.AvgTests - method: "testFold {TestCase= #7}" - issue: https://github.com/elastic/elasticsearch/issues/114175 - class: org.elasticsearch.xpack.ilm.ExplainLifecycleIT method: testStepInfoPreservedOnAutoRetry issue: https://github.com/elastic/elasticsearch/issues/114220 -- class: org.elasticsearch.xpack.inference.services.openai.OpenAiServiceTests - method: testInfer_StreamRequest - issue: https://github.com/elastic/elasticsearch/issues/114232 -- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests - method: testInfer_StreamRequest_ErrorResponse - issue: https://github.com/elastic/elasticsearch/issues/114327 -- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests - method: testInfer_StreamRequest - issue: https://github.com/elastic/elasticsearch/issues/114385 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114412 - class: org.elasticsearch.xpack.inference.InferenceRestIT method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint} issue: https://github.com/elastic/elasticsearch/issues/114376 -- class: org.elasticsearch.search.retriever.RankDocsRetrieverBuilderTests - method: testRewrite - issue: https://github.com/elastic/elasticsearch/issues/114467 - class: org.elasticsearch.packaging.test.DockerTests method: test022InstallPluginsFromLocalArchive issue: https://github.com/elastic/elasticsearch/issues/111063 @@ -276,15 +232,6 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultElserIT method: testInferCreatesDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114503 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Synonym set not found} - issue: https://github.com/elastic/elasticsearch/issues/114432 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Get a synonym rule} - issue: https://github.com/elastic/elasticsearch/issues/114443 -- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT - method: test {p0=synonyms/60_synonym_rule_get/Synonym rule not found} - issue: https://github.com/elastic/elasticsearch/issues/114444 - class: org.elasticsearch.xpack.inference.integration.ModelRegistryIT method: testGetModel issue: https://github.com/elastic/elasticsearch/issues/114657 @@ -308,30 +255,33 @@ tests: - class: org.elasticsearch.xpack.inference.DefaultEndPointsIT method: testInferDeploysDefaultElser issue: https://github.com/elastic/elasticsearch/issues/114913 -- class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT - method: testUpgradeMovesRepoToNewMetaVersion - issue: https://github.com/elastic/elasticsearch/issues/114994 -- class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT - method: testReadOnlyRepo - issue: https://github.com/elastic/elasticsearch/issues/114997 -- class: org.elasticsearch.upgrades.MultiVersionRepositoryAccessIT - method: testCreateAndRestoreSnapshot - issue: https://github.com/elastic/elasticsearch/issues/114998 -- class: org.elasticsearch.index.mapper.TextFieldMapperTests - method: testBlockLoaderFromRowStrideReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115066 -- class: org.elasticsearch.index.mapper.TextFieldMapperTests - method: testBlockLoaderFromColumnReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115073 -- class: org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapperTests - method: testBlockLoaderFromColumnReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115074 -- class: org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapperTests - method: testBlockLoaderFromRowStrideReaderWithSyntheticSource - issue: https://github.com/elastic/elasticsearch/issues/115076 - class: org.elasticsearch.xpack.test.rest.XPackRestIT method: test {p0=esql/60_usage/Basic ESQL usage output (telemetry)} issue: https://github.com/elastic/elasticsearch/issues/115231 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115339 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testWatchWithLastCheckedTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115354 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testAddWithLastCheckedTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115356 +- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT + method: testInferDeploysDefaultE5 + issue: https://github.com/elastic/elasticsearch/issues/115361 +- class: org.elasticsearch.xpack.watcher.trigger.schedule.engine.TickerScheduleEngineTests + method: testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval + issue: https://github.com/elastic/elasticsearch/issues/115368 +- class: org.elasticsearch.reservedstate.service.FileSettingsServiceTests + method: testProcessFileChanges + issue: https://github.com/elastic/elasticsearch/issues/115280 +- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT + method: test {yaml=ingest/80_ingest_simulate/Test mapping addition works with legacy templates} + issue: https://github.com/elastic/elasticsearch/issues/115412 +- class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT + method: testFileSettingsReprocessedOnRestartWithoutVersionChange + issue: https://github.com/elastic/elasticsearch/issues/115450 # Examples: # diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 73f291da15ead..92a704f793dc2 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedFunction; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; @@ -1203,15 +1202,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_INDEXING) // This check can be removed (always assume true) - var originalClusterSupportsReplicationOfClosedIndices = oldClusterHasFeature(RestTestLegacyFeatures.REPLICATION_OF_CLOSED_INDICES); - - if (originalClusterSupportsReplicationOfClosedIndices) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java index 3275f3e0e136f..834d97f755dfb 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FileSettingsRoleMappingUpgradeIT.java @@ -28,6 +28,7 @@ import java.util.function.Supplier; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -101,11 +102,17 @@ public void testRoleMappingsAppliedOnUpgrade() throws IOException { // the nodes have all been upgraded. Check they re-processed the role mappings in the settings file on // upgrade Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); - List roleMappings = new XContentTestUtils.JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))).get( - "metadata.role_mappings.role_mappings" + List clusterStateRoleMappings = new XContentTestUtils.JsonMapView( + entityAsMap(client().performRequest(clusterStateRequest)) + ).get("metadata.role_mappings.role_mappings"); + assertThat(clusterStateRoleMappings, is(not(nullValue()))); + assertThat(clusterStateRoleMappings.size(), equalTo(1)); + + assertThat( + entityAsMap(client().performRequest(new Request("GET", "/_security/role_mapping"))).keySet(), + // TODO change this to `contains` once the clean-up migration work is merged + hasItem("everyone_kibana-read-only-operator-mapping") ); - assertThat(roleMappings, is(not(nullValue()))); - assertThat(roleMappings.size(), equalTo(1)); } } } diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java index e7cff5cca5a92..a20981a119d8f 100644 --- a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java @@ -55,14 +55,13 @@ public static Iterable parameters() { protected abstract ElasticsearchCluster getUpgradeCluster(); @Before - public void extractOldClusterFeatures() { + public void upgradeNode() throws Exception { + // extract old cluster features if (isOldCluster() && oldClusterTestFeatureService == null) { oldClusterTestFeatureService = testFeatureService; } - } - @Before - public void extractOldIndexVersion() throws Exception { + // extract old index version if (oldIndexVersion == null && upgradedNodes.isEmpty()) { IndexVersion indexVersion = null; // these should all be the same version @@ -93,13 +92,11 @@ public void extractOldIndexVersion() throws Exception { assertThat("Index version could not be read", indexVersion, notNullValue()); oldIndexVersion = indexVersion; } - } - @Before - public void upgradeNode() throws Exception { // Skip remaining tests if upgrade failed assumeFalse("Cluster upgrade failed", upgradeFailed); + // finally, upgrade node if (upgradedNodes.size() < requestedUpgradedNodes) { closeClients(); // we might be running a specific upgrade test by itself - check previous nodes too diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml index d4aa2f1ad4467..4d1a62c6f179e 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/resources/rest-api-spec/test/ingest/80_ingest_simulate.yml @@ -1229,7 +1229,7 @@ setup: - requires: cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.16" + reason: "ingest simulate mapping addition added in 8.17" - do: headers: @@ -1465,7 +1465,7 @@ setup: - requires: cluster_features: ["simulate.mapping.addition"] - reason: "ingest simulate mapping addition added in 8.16" + reason: "ingest simulate mapping addition added in 8.17" - do: indices.put_template: @@ -1571,3 +1571,143 @@ setup: - match: { docs.0.doc._source.foo: 3 } - match: { docs.0.doc._source.bar: "not a boolean" } - not_exists: docs.0.doc.error + +--- +"Test mapping addition works with indices without templates": + # In this test, we make sure that when we have an index that has mapping but was not built with a template, that the additional_mapping + # is merged in with that template. + + - skip: + features: + - headers + - allowed_warnings + + - requires: + cluster_features: ["simulate.support.non.template.mapping"] + reason: "ingest simulate support for indices with mappings that didn't come from templates added in 8.17" + + # First, make sure that validation fails before we create the index (since we are only defining to bar field but trying to index a value + # for foo. + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ], + "mapping_addition": { + "dynamic": "strict", + "properties": { + "bar": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + + - do: + indices.create: + index: foo-1 + body: + mappings: + dynamic: strict + properties: + foo: + type: integer + - match: { acknowledged: true } + + # Now make sure that the mapping for the newly-created index is getting picked up. Validation fails because it only defined a mapping + # for foo, not for bar. + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - match: { docs.0.doc.error.type: "strict_dynamic_mapping_exception" } + + # Now we make sure that the index's mapping gets merged with the mapping_addition: + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: foo-1 + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ], + "mapping_addition": { + "dynamic": "strict", + "properties": { + "bar": { + "type": "keyword" + } + } + } + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "foo-1" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - not_exists: docs.0.doc.error + + # This last call to simulate is just making sure that if there are no templates, no index mappings, no substitutions, and no mapping + # addition, then validation does not fail + - do: + headers: + Content-Type: application/json + simulate.ingest: + index: nonexistent + body: > + { + "docs": [ + { + "_id": "asdf", + "_source": { + "foo": 3, + "bar": "some text value" + } + } + ] + } + - length: { docs: 1 } + - match: { docs.0.doc._index: "nonexistent" } + - match: { docs.0.doc._source.foo: 3 } + - match: { docs.0.doc._source.bar: "some text value" } + - not_exists: docs.0.doc.error diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 7525ff2dc12d2..4bd293f0a8641 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -59,4 +59,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task -> task.replaceValueInMatch("profile.shards.0.dfs.knn.0.query.0.description", "DocAndScoreQuery[0,...][0.009673266,...],0.009673266", "dfs knn vector profiling with vector_operations_count") task.skipTest("indices.sort/10_basic/Index Sort", "warning does not exist for compatibility") task.skipTest("search/330_fetch_fields/Test search rewrite", "warning does not exist for compatibility") + task.skipTest("indices.create/21_synthetic_source_stored/object param - nested object with stored array", "temporary until backported") }) diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml index eab51427876aa..6a4e92f694220 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.create/21_synthetic_source_stored.yml @@ -319,8 +319,8 @@ object param - nested object array next to other fields: --- object param - nested object with stored array: - requires: - cluster_features: ["mapper.synthetic_source_keep", "mapper.bwc_workaround_9_0"] - reason: requires tracking ignored source + cluster_features: ["mapper.ignored_source.always_store_object_arrays_in_nested", "mapper.bwc_workaround_9_0"] + reason: requires fix to object array handling - do: indices.create: @@ -356,8 +356,11 @@ object param - nested object with stored array: sort: name - match: { hits.total.value: 2 } - match: { hits.hits.0._source.name: A } - - match: { hits.hits.0._source.nested_array_regular.0.b.c: [ 10, 100] } - - match: { hits.hits.0._source.nested_array_regular.1.b.c: [ 20, 200] } + # due to a workaround for #115261 + - match: { hits.hits.0._source.nested_array_regular.0.b.0.c: 10 } + - match: { hits.hits.0._source.nested_array_regular.0.b.1.c: 100 } + - match: { hits.hits.0._source.nested_array_regular.1.b.0.c: 20 } + - match: { hits.hits.0._source.nested_array_regular.1.b.1.c: 200 } - match: { hits.hits.1._source.name: B } - match: { hits.hits.1._source.nested_array_stored.0.b.0.c: 10 } - match: { hits.hits.1._source.nested_array_stored.0.b.1.c: 100 } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml index bcd58f3f7bd64..675b98133ce11 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/10_synonyms_put.yml @@ -15,6 +15,10 @@ setup: - match: { result: "created" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.get_synonym: id: test-update-synonyms @@ -58,6 +62,10 @@ setup: - match: { result: "created" } + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.get_synonym: id: test-empty-synonyms diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml index d3d0a3bb4df70..4e77e10495109 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/110_synonyms_invalid.yml @@ -11,6 +11,11 @@ setup: synonyms_set: synonyms: "foo => bar, baz" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: indices.create: index: test_index diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml index 3494f33466ce4..5e6d4ec2341ad 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/20_synonyms_get.yml @@ -14,6 +14,10 @@ setup: - synonyms: "test => check" id: "test-id-3" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Get synonyms set": @@ -31,7 +35,6 @@ setup: id: "test-id-2" - synonyms: "test => check" id: "test-id-3" - --- "Get synonyms set - not found": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml index 351ff4e186d8a..23c907f6a1137 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/30_synonyms_delete.yml @@ -12,6 +12,10 @@ setup: - synonyms: "bye => goodbye" id: "test-id-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Delete synonyms set": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml index 723c41e163eb8..7c145dafd81cd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/40_synonyms_sets_get.yml @@ -9,6 +9,12 @@ setup: synonyms_set: - synonyms: "hello, hi" - synonyms: "goodbye, bye" + + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + - do: synonyms.put_synonym: id: test-synonyms-1 @@ -23,21 +29,8 @@ setup: body: synonyms_set: - synonyms: "pc, computer" - # set logging to debug for issue: https://github.com/elastic/elasticsearch/issues/102261 - - do: - cluster.put_settings: - body: - persistent: - logger.org.elasticsearch.synonyms: DEBUG --- -teardown: - - do: - cluster.put_settings: - body: - persistent: - logger.org.elasticsearch.synonyms: null ---- "List synonyms set": - do: synonyms.get_synonyms_sets: { } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml index f3711bb0774ca..d8611000fe465 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/50_synonym_rule_put.yml @@ -14,7 +14,10 @@ setup: - synonyms: "test => check" id: "test-id-3" - + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true --- "Update a synonyms rule": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml index 2a7c8aff89d8e..0c962b51e08cb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/60_synonym_rule_get.yml @@ -13,11 +13,12 @@ setup: id: "test-id-2" - synonyms: "test => check" id: "test-id-3" + + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. - do: cluster.health: - index: .synonyms - timeout: 1m - wait_for_status: green + wait_for_no_initializing_shards: true + --- "Get a synonym rule": diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml index a4853b0b6d414..41ab293158a35 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/70_synonym_rule_delete.yml @@ -14,6 +14,11 @@ setup: - synonyms: "test => check" id: "test-id-3" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + --- "Delete synonym rule": - do: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml index 89ad933370e1c..3aba0f0b4b78b 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/80_synonyms_from_index.yml @@ -2,7 +2,6 @@ setup: - requires: cluster_features: ["gte_v8.10.0"] reason: Loading synonyms from index is introduced in 8.10.0 - # Create a new synonyms set - do: synonyms.put_synonym: @@ -14,6 +13,11 @@ setup: - synonyms: "bye => goodbye" id: "synonym-rule-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + # Create an index with synonym_filter that uses that synonyms set - do: indices.create: diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml index dc94b36222402..1ceb5b43b8129 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/synonyms/90_synonyms_reloading_for_synset.yml @@ -3,7 +3,6 @@ - requires: cluster_features: ["gte_v8.10.0"] reason: Reloading analyzers for specific synonym set is introduced in 8.10.0 - # Create synonyms_set1 - do: synonyms.put_synonym: @@ -26,6 +25,11 @@ - synonyms: "bye => goodbye" id: "synonym-rule-2" + # This is to ensure that all index shards (write and read) are available. In serverless this can take some time. + - do: + cluster.health: + wait_for_no_initializing_shards: true + # Create my_index1 with synonym_filter that uses synonyms_set1 - do: indices.create: diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 214fc47222f3a..bf81200509691 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -21,21 +21,15 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; -import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; -import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.xcontent.XContentType; -import java.util.ArrayList; -import java.util.HashSet; import java.util.List; -import java.util.Set; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; /** * Tests relating to the loss of the master. @@ -43,44 +37,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one - */ - public void testMasterNodeGCs() throws Exception { - List nodes = startCluster(3); - // NOTE: this assume must happen after starting the cluster, so that cleanup will have something to cleanup. - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - - String oldMasterNode = internalCluster().getMasterName(); - // a very long GC, but it's OK as we remove the disruption when it has had an effect - SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption(random(), oldMasterNode, 100, 200, 30000, 60000); - internalCluster().setDisruptionScheme(masterNodeDisruption); - masterNodeDisruption.startDisrupting(); - - Set oldNonMasterNodesSet = new HashSet<>(nodes); - oldNonMasterNodesSet.remove(oldMasterNode); - - List oldNonMasterNodes = new ArrayList<>(oldNonMasterNodesSet); - - logger.info("waiting for nodes to de-elect master [{}]", oldMasterNode); - for (String node : oldNonMasterNodesSet) { - assertDifferentMaster(node, oldMasterNode); - } - - logger.info("waiting for nodes to elect a new master"); - ensureStableCluster(2, oldNonMasterNodes.get(0)); - - // restore GC - masterNodeDisruption.stopDisrupting(); - final TimeValue waitTime = new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()); - ensureStableCluster(3, waitTime, false, oldNonMasterNodes.get(0)); - - // make sure all nodes agree on master - String newMaster = internalCluster().getMasterName(); - assertThat(newMaster, not(equalTo(oldMasterNode))); - assertMaster(newMaster, nodes); - } - /** * This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition * and verifies that all node agree on the new cluster state diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 32c602791cca4..48db23635220c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -14,33 +14,26 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.coordination.CoordinationDiagnosticsService; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.LeaderChecker; import org.elasticsearch.cluster.coordination.MasterHistoryService; -import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.Tuple; import org.elasticsearch.health.GetHealthAction; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; -import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; @@ -50,17 +43,12 @@ import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.Set; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static java.util.Collections.singleton; @@ -227,266 +215,6 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType ensureStableCluster(3); } - /** - * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are - * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. - */ - public void testStaleMasterNotHijackingMajority() throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - final List nodes = internalCluster().startNodes( - 3, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .build() - ); - ensureStableCluster(3); - - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (Objects.equals(previousMaster, currentMaster) == false) { - logger.info( - "--> node {} received new cluster state: {} \n and had previous cluster state: {}", - node, - event.state(), - event.previousState() - ); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for majority side to elect a new master - assertBusy(() -> { - for (final Map.Entry>> entry : masters.entrySet()) { - final List> transitions = entry.getValue(); - assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null)); - } - }); - - // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and - // once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is - // flagged as master to the other nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode) - .submitUnbatchedStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(Exception e) { - logger.warn("failure [sneaky-update]", e); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("--> new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("--> unfreezing node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - logger.info("--> [{}] stepped down as master", oldMasterNode); - ensureStableCluster(3); - - assertThat(masters.size(), equalTo(2)); - for (Map.Entry>> entry : masters.entrySet()) { - String nodeName = entry.getKey(); - List> transitions = entry.getValue(); - assertTrue( - "[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions, - transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2())) - ); - } - assertGreenMasterStability(internalCluster().client()); - } - - /** - * This helper method creates a 3-node cluster where all nodes are master-eligible, and then simulates a long GC on the master node 5 - * times (forcing another node to be elected master 5 times). It then asserts that the master stability health indicator status is - * YELLOW, and that expectedMasterStabilitySymptomSubstring is contained in the symptom. - * @param expectedMasterStabilitySymptomSubstring A string to expect in the master stability health indicator symptom - * @throws Exception - */ - public void testRepeatedMasterChanges(String expectedMasterStabilitySymptomSubstring) throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - final List nodes = internalCluster().startNodes( - 3, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.IDENTITY_CHANGES_THRESHOLD_SETTING.getKey(), 1) - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 100) - .build() - ); - ensureStableCluster(3); - String firstMaster = internalCluster().getMasterName(); - // Force the master to change 2 times: - for (int i = 0; i < 2; i++) { - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (Objects.equals(previousMaster, currentMaster) == false) { - logger.info( - "--> node {} received new cluster state: {} \n and had previous cluster state: {}", - node, - event.state(), - event.previousState() - ); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - internalCluster().clearDisruptionScheme(); - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for majority side to elect a new master - assertBusy(() -> { - for (final Map.Entry>> entry : masters.entrySet()) { - final List> transitions = entry.getValue(); - assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null)); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("--> new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("--> unfreezing node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - logger.info("--> [{}] stepped down as master", oldMasterNode); - ensureStableCluster(3); - - assertThat(masters.size(), equalTo(2)); - } - List nodeNamesExceptFirstMaster = Arrays.stream(internalCluster().getNodeNames()) - .filter(name -> name.equals(firstMaster) == false) - .toList(); - /* - * It is possible that the first node that became master got re-elected repeatedly. And since it was in a simulated GC when the - * other node(s) were master, it only saw itself as master. So we want to check with another node. - */ - Client client = internalCluster().client(randomFrom(nodeNamesExceptFirstMaster)); - assertMasterStability(client, HealthStatus.YELLOW, containsString(expectedMasterStabilitySymptomSubstring)); - } - - public void testRepeatedNullMasterRecognizedAsGreenIfMasterDoesNotKnowItIsUnstable() throws Exception { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - /* - * In this test we have a single master-eligible node. We pause it repeatedly (simulating a long GC pause for example) so that - * other nodes decide it is no longer the master. However since there is no other master-eligible node, another node is never - * elected master. And the master node never recognizes that it had a problem. So when we run the master stability check on one - * of the data nodes, it will see that there is a problem (the master has gone null repeatedly), but when it checks with the - * master, the master says everything is fine. So we expect a GREEN status. - */ - final List masterNodes = internalCluster().startMasterOnlyNodes( - 1, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), 1) - .build() - ); - int nullTransitionsThreshold = 1; - final List dataNodes = internalCluster().startDataOnlyNodes( - 2, - Settings.builder() - .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") - .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") - .put(CoordinationDiagnosticsService.NO_MASTER_TRANSITIONS_THRESHOLD_SETTING.getKey(), nullTransitionsThreshold) - .put(CoordinationDiagnosticsService.NODE_HAS_MASTER_LOOKUP_TIMEFRAME_SETTING.getKey(), new TimeValue(60, TimeUnit.SECONDS)) - .build() - ); - ensureStableCluster(3); - for (int i = 0; i < nullTransitionsThreshold + 1; i++) { - final String masterNode = masterNodes.get(0); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), masterNode); - - final CountDownLatch dataNodeMasterSteppedDown = new CountDownLatch(2); - internalCluster().getInstance(ClusterService.class, dataNodes.get(0)).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - dataNodeMasterSteppedDown.countDown(); - } - }); - internalCluster().getInstance(ClusterService.class, dataNodes.get(1)).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - dataNodeMasterSteppedDown.countDown(); - } - }); - internalCluster().clearDisruptionScheme(); - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", masterNode); - masterNodeDisruption.startDisrupting(); - dataNodeMasterSteppedDown.await(30, TimeUnit.SECONDS); - // Stop disruption - logger.info("--> unfreezing node [{}]", masterNode); - masterNodeDisruption.stopDisrupting(); - ensureStableCluster(3, TimeValue.timeValueSeconds(30), false, randomFrom(dataNodes)); - } - assertGreenMasterStability(internalCluster().client(randomFrom(dataNodes))); - } - public void testNoMasterEligibleNodes() throws Exception { /* * In this test we have a single master-eligible node. We then stop the master. We set the master lookup threshold very low on the diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java index f813932ebe924..ecd5c5af8649f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -123,7 +123,7 @@ public Settings onNodeStopped(String nodeName) { public void testFailsToStartIfDowngraded() { final IllegalStateException illegalStateException = expectThrowsOnRestart( - dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooNewVersion(), dataPaths) + dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooNewBuildVersion(), dataPaths) ); assertThat( illegalStateException.getMessage(), @@ -133,7 +133,7 @@ public void testFailsToStartIfDowngraded() { public void testFailsToStartIfUpgradedTooFar() { final IllegalStateException illegalStateException = expectThrowsOnRestart( - dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooOldVersion(), dataPaths) + dataPaths -> PersistedClusterStateService.overrideVersion(NodeMetadataTests.tooOldBuildVersion(), dataPaths) ); assertThat( illegalStateException.getMessage(), diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 7e06004e47cfb..6d9bf2ac52f2d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -174,6 +174,7 @@ static TransportVersion def(int id) { public static final TransportVersion ESQL_PER_AGGREGATE_FILTER = def(8_770_00_0); public static final TransportVersion ML_INFERENCE_ATTACH_TO_EXISTSING_DEPLOYMENT = def(8_771_00_0); public static final TransportVersion CONVERT_FAILURE_STORE_OPTIONS_TO_SELECTOR_OPTIONS_INTERNALLY = def(8_772_00_0); + public static final TransportVersion INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16 = def(8_772_00_1); public static final TransportVersion REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_773_00_0); public static final TransportVersion REVERT_REMOVE_MIN_COMPATIBLE_SHARD_NODE = def(8_774_00_0); public static final TransportVersion ESQL_FIELD_ATTRIBUTE_PARENT_SIMPLIFIED = def(8_775_00_0); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index b855f2cee7613..9ffef1f178f44 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; @@ -85,7 +84,7 @@ protected void masterOperation( String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); Map> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices()); Map indicesAndFilters = new HashMap<>(); - Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); for (String index : concreteIndices) { final AliasFilter aliasFilter = indicesService.buildAliasFilter(clusterState, index, indicesAndAliases); final String[] aliases = indexNameExpressionResolver.indexAliases( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index 080ebb5951a7a..553f784d23a87 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -35,6 +35,7 @@ public class CreateIndexClusterStateUpdateRequest { private ResizeType resizeType; private boolean copySettings; private SystemDataStreamDescriptor systemDataStreamDescriptor; + private boolean isFailureIndex = false; private Settings settings = Settings.EMPTY; @@ -102,6 +103,11 @@ public CreateIndexClusterStateUpdateRequest systemDataStreamDescriptor(SystemDat return this; } + public CreateIndexClusterStateUpdateRequest isFailureIndex(boolean isFailureIndex) { + this.isFailureIndex = isFailureIndex; + return this; + } + public String cause() { return cause; } @@ -168,6 +174,10 @@ public String dataStreamName() { return dataStreamName; } + public boolean isFailureIndex() { + return isFailureIndex; + } + public CreateIndexClusterStateUpdateRequest dataStreamName(String dataStreamName) { this.dataStreamName = dataStreamName; return this; @@ -228,6 +238,8 @@ public String toString() { + systemDataStreamDescriptor + ", matchingTemplate=" + matchingTemplate + + ", isFailureIndex=" + + isFailureIndex + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index f5c100b7884bb..5c5c71bc002b3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -566,8 +565,8 @@ static void resolveIndices( if (names.length == 1 && (Metadata.ALL.equals(names[0]) || Regex.isMatchAllPattern(names[0]))) { names = new String[] { "**" }; } - Set resolvedIndexAbstractions = resolver.resolveExpressions(clusterState, indicesOptions, true, names); - for (ResolvedExpression s : resolvedIndexAbstractions) { + Set resolvedIndexAbstractions = resolver.resolveExpressions(clusterState, indicesOptions, true, names); + for (String s : resolvedIndexAbstractions) { enrichIndexAbstraction(clusterState, s, indices, aliases, dataStreams); } indices.sort(Comparator.comparing(ResolvedIndexAbstraction::getName)); @@ -598,12 +597,12 @@ private static void mergeResults( private static void enrichIndexAbstraction( ClusterState clusterState, - ResolvedExpression indexAbstraction, + String indexAbstraction, List indices, List aliases, List dataStreams ) { - IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction.resource()); + IndexAbstraction ia = clusterState.metadata().getIndicesLookup().get(indexAbstraction); if (ia != null) { switch (ia.getType()) { case CONCRETE_INDEX -> { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 5e3799cd14518..94d9b87467ea8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -48,6 +48,7 @@ import java.time.Instant; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -270,6 +271,7 @@ public static Template resolveTemplate( // First apply settings sourced from index settings providers final var now = Instant.now(); Settings.Builder additionalSettings = Settings.builder(); + Set overrulingSettings = new HashSet<>(); for (var provider : indexSettingProviders) { Settings result = provider.getAdditionalIndexSettings( indexName, @@ -283,8 +285,21 @@ public static Template resolveTemplate( MetadataCreateIndexService.validateAdditionalSettings(provider, result, additionalSettings); dummySettings.put(result); additionalSettings.put(result); + if (provider.overrulesTemplateAndRequestSettings()) { + overrulingSettings.addAll(result.keySet()); + } } - // Then apply settings resolved from templates: + + if (overrulingSettings.isEmpty() == false) { + // Filter any conflicting settings from overruling providers, to avoid overwriting their values from templates. + final Settings.Builder filtered = Settings.builder().put(templateSettings); + for (String setting : overrulingSettings) { + filtered.remove(setting); + } + templateSettings = filtered.build(); + } + + // Apply settings resolved from templates. dummySettings.put(templateSettings); final IndexMetadata indexMetadata = IndexMetadata.builder(indexName) diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index e01f364712676..4e9830fe0d14e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardRouting; @@ -134,7 +133,7 @@ protected void doExecute(Task task, ValidateQueryRequest request, ActionListener @Override protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) { final ClusterState clusterState = clusterService.state(); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, request.indices()); final AliasFilter aliasFilter = searchService.buildAliasFilter(clusterState, shard.getIndexName(), indicesAndAliases); return new ShardValidateQueryRequest(shard.shardId(), aliasFilter, request); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java index 22cf8a2260d87..62a9b88cb6a57 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkFeatures.java @@ -19,6 +19,7 @@ import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_ADDITION; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION; import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_MAPPING_VALIDATION_TEMPLATES; +import static org.elasticsearch.action.bulk.TransportSimulateBulkAction.SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING; public class BulkFeatures implements FeatureSpecification { public Set getFeatures() { @@ -27,7 +28,8 @@ public Set getFeatures() { SIMULATE_MAPPING_VALIDATION_TEMPLATES, SIMULATE_COMPONENT_TEMPLATE_SUBSTITUTIONS, SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS, - SIMULATE_MAPPING_ADDITION + SIMULATE_MAPPING_ADDITION, + SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING ); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java index 007f274d7f493..130d6286f7e02 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkOperation.java @@ -320,6 +320,12 @@ private Map> groupRequestsByShards( shard -> new ArrayList<>() ); shardRequests.add(bulkItemRequest); + } catch (DataStream.TimestampError timestampError) { + IndexDocFailureStoreStatus failureStoreStatus = processFailure(bulkItemRequest, clusterState, timestampError); + if (IndexDocFailureStoreStatus.USED.equals(failureStoreStatus) == false) { + String name = ia != null ? ia.getName() : docWriteRequest.index(); + addFailureAndDiscardRequest(docWriteRequest, bulkItemRequest.id(), name, timestampError, failureStoreStatus); + } } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException | ResourceNotFoundException e) { String name = ia != null ? ia.getName() : docWriteRequest.index(); var failureStoreStatus = isFailureStoreRequest(docWriteRequest) @@ -545,6 +551,7 @@ private IndexDocFailureStoreStatus processFailure(BulkItemRequest bulkItemReques boolean added = addDocumentToRedirectRequests(bulkItemRequest, cause, failureStoreCandidate.getName()); if (added) { failureStoreMetrics.incrementFailureStore(bulkItemRequest.index(), errorType, FailureStoreMetrics.ErrorLocation.SHARD); + return IndexDocFailureStoreStatus.USED; } else { failureStoreMetrics.incrementRejected( bulkItemRequest.index(), diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java index 0888b70f5399c..1353fa78595ef 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSimulateBulkAction.java @@ -84,6 +84,7 @@ public class TransportSimulateBulkAction extends TransportAbstractBulkAction { ); public static final NodeFeature SIMULATE_INDEX_TEMPLATE_SUBSTITUTIONS = new NodeFeature("simulate.index.template.substitutions"); public static final NodeFeature SIMULATE_MAPPING_ADDITION = new NodeFeature("simulate.mapping.addition"); + public static final NodeFeature SIMULATE_SUPPORT_NON_TEMPLATE_MAPPING = new NodeFeature("simulate.support.non.template.mapping"); private final IndicesService indicesService; private final NamedXContentRegistry xContentRegistry; private final Set indexSettingProviders; @@ -258,6 +259,10 @@ private Exception validateMappings( String matchingTemplate = findV2Template(simulatedState.metadata(), request.index(), false); if (matchingTemplate != null) { + /* + * The index matches a v2 template (including possibly one or more of the substitutions passed in). So we use this + * template, and then possibly apply the mapping addition if it is not null, and validate. + */ final Template template = TransportSimulateIndexTemplateAction.resolveTemplate( matchingTemplate, request.index(), @@ -273,13 +278,36 @@ private Exception validateMappings( validateUpdatedMappings(mappings, mergedMappings, request, sourceToParse); } else { List matchingTemplates = findV1Templates(simulatedState.metadata(), request.index(), false); - final Map mappingsMap = MetadataCreateIndexService.parseV1Mappings( - "{}", - matchingTemplates.stream().map(IndexTemplateMetadata::getMappings).collect(toList()), - xContentRegistry - ); - final CompressedXContent combinedMappings = mergeMappings(new CompressedXContent(mappingsMap), mappingAddition); - validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + if (matchingTemplates.isEmpty() == false) { + /* + * The index matches v1 mappings. These are not compatible with component_template_substitutions or + * index_template_substitutions, but we can apply a mapping_addition. + */ + final Map mappingsMap = MetadataCreateIndexService.parseV1Mappings( + "{}", + matchingTemplates.stream().map(IndexTemplateMetadata::getMappings).collect(toList()), + xContentRegistry + ); + final CompressedXContent combinedMappings = mergeMappings(new CompressedXContent(mappingsMap), mappingAddition); + validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + } else if (indexAbstraction != null && mappingAddition.isEmpty() == false) { + /* + * The index matched no templates of any kind, including the substitutions. But it might have a mapping. So we + * merge in the mapping addition if it exists, and validate. + */ + MappingMetadata mappingFromIndex = clusterService.state().metadata().index(indexAbstraction.getName()).mapping(); + CompressedXContent currentIndexCompressedXContent = mappingFromIndex == null ? null : mappingFromIndex.source(); + CompressedXContent combinedMappings = mergeMappings(currentIndexCompressedXContent, mappingAddition); + validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + } else { + /* + * The index matched no templates and had no mapping of its own. If there were component template substitutions + * or index template substitutions, they didn't match anything. So just apply the mapping addition if it exists, + * and validate. + */ + final CompressedXContent combinedMappings = mergeMappings(null, mappingAddition); + validateUpdatedMappings(null, combinedMappings, request, sourceToParse); + } } } } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index 84c6df7b8a66f..9c82d032014f2 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; @@ -110,7 +109,7 @@ protected boolean resolveIndex(ExplainRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(state, request.request().index()); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(state, request.request().index()); final AliasFilter aliasFilter = searchService.buildAliasFilter(state, request.concreteIndex(), indicesAndAliases); request.request().filteringAlias(aliasFilter); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index b5864f64a7824..1645a378446a4 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -37,7 +37,6 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.GroupShardsIterator; @@ -111,7 +110,6 @@ import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; -import java.util.stream.Collectors; import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.elasticsearch.action.search.SearchType.QUERY_THEN_FETCH; @@ -205,7 +203,7 @@ public TransportSearchAction( private Map buildPerIndexOriginalIndices( ClusterState clusterState, - Set indicesAndAliases, + Set indicesAndAliases, String[] indices, IndicesOptions indicesOptions ) { @@ -213,9 +211,6 @@ private Map buildPerIndexOriginalIndices( var blocks = clusterState.blocks(); // optimization: mostly we do not have any blocks so there's no point in the expensive per-index checking boolean hasBlocks = blocks.global().isEmpty() == false || blocks.indices().isEmpty() == false; - // Get a distinct set of index abstraction names present from the resolved expressions to help with the reverse resolution from - // concrete index to the expression that produced it. - Set indicesAndAliasesResources = indicesAndAliases.stream().map(ResolvedExpression::resource).collect(Collectors.toSet()); for (String index : indices) { if (hasBlocks) { blocks.indexBlockedRaiseException(ClusterBlockLevel.READ, index); @@ -232,8 +227,8 @@ private Map buildPerIndexOriginalIndices( String[] finalIndices = Strings.EMPTY_ARRAY; if (aliases == null || aliases.length == 0 - || indicesAndAliasesResources.contains(index) - || hasDataStreamRef(clusterState, indicesAndAliasesResources, index)) { + || indicesAndAliases.contains(index) + || hasDataStreamRef(clusterState, indicesAndAliases, index)) { finalIndices = new String[] { index }; } if (aliases != null) { @@ -252,11 +247,7 @@ private static boolean hasDataStreamRef(ClusterState clusterState, Set i return indicesAndAliases.contains(ret.getParentDataStream().getName()); } - Map buildIndexAliasFilters( - ClusterState clusterState, - Set indicesAndAliases, - Index[] concreteIndices - ) { + Map buildIndexAliasFilters(ClusterState clusterState, Set indicesAndAliases, Index[] concreteIndices) { final Map aliasFilterMap = new HashMap<>(); for (Index index : concreteIndices) { clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index.getName()); @@ -1246,10 +1237,7 @@ private void executeSearch( } else { final Index[] indices = resolvedIndices.getConcreteLocalIndices(); concreteLocalIndices = Arrays.stream(indices).map(Index::getName).toArray(String[]::new); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions( - clusterState, - searchRequest.indices() - ); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, searchRequest.indices()); aliasFilter = buildIndexAliasFilters(clusterState, indicesAndAliases, indices); aliasFilter.putAll(remoteAliasMap); localShardIterators = getLocalShardsIterator( @@ -1822,7 +1810,7 @@ List getLocalShardsIterator( ClusterState clusterState, SearchRequest searchRequest, String clusterAlias, - Set indicesAndAliases, + Set indicesAndAliases, String[] concreteIndices ) { var routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices()); diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java index b94bd95c93d8a..f418b5617b2a1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchShardsAction.java @@ -17,7 +17,6 @@ import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.index.Index; @@ -128,10 +127,7 @@ public void searchShards(Task task, SearchShardsRequest searchShardsRequest, Act searchService.getRewriteContext(timeProvider::absoluteStartMillis, resolvedIndices, null), listener.delegateFailureAndWrap((delegate, searchRequest) -> { Index[] concreteIndices = resolvedIndices.getConcreteLocalIndices(); - final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions( - clusterState, - searchRequest.indices() - ); + final Set indicesAndAliases = indexNameExpressionResolver.resolveExpressions(clusterState, searchRequest.indices()); final Map aliasFilters = transportSearchAction.buildIndexAliasFilters( clusterState, indicesAndAliases, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index 6d1a874e1c72b..ae7cff6312155 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -28,6 +28,8 @@ import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -189,9 +191,14 @@ public List getRequiredComponentTemplates() { if (ignoreMissingComponentTemplates == null) { return componentTemplates; } - return componentTemplates.stream() - .filter(componentTemplate -> ignoreMissingComponentTemplates.contains(componentTemplate) == false) - .toList(); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + List required = new ArrayList<>(componentTemplates.size()); + for (String template : componentTemplates) { + if (ignoreMissingComponentTemplates.contains(template) == false) { + required.add(template); + } + } + return Collections.unmodifiableList(required); } @Nullable diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index bedf65e1a9c8b..4dcc7c73c280e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -1343,7 +1343,7 @@ public Index getWriteIndex(IndexRequest request, Metadata metadata) { + "]" ) .collect(Collectors.joining()); - throw new IllegalArgumentException( + throw new TimestampError( "the document timestamp [" + timestampAsString + "] is outside of ranges of currently writable indices [" @@ -1405,10 +1405,10 @@ private static Instant getTimeStampFromRaw(Object rawTimestamp) { } else if (rawTimestamp instanceof String sTimestamp) { return DateFormatters.from(TIMESTAMP_FORMATTER.parse(sTimestamp), TIMESTAMP_FORMATTER.locale()).toInstant(); } else { - throw new IllegalArgumentException("timestamp [" + rawTimestamp + "] type [" + rawTimestamp.getClass() + "] error"); + throw new TimestampError("timestamp [" + rawTimestamp + "] type [" + rawTimestamp.getClass() + "] error"); } } catch (Exception e) { - throw new IllegalArgumentException("Error get data stream timestamp field: " + e.getMessage(), e); + throw new TimestampError("Error get data stream timestamp field: " + e.getMessage(), e); } } @@ -1432,7 +1432,7 @@ private static Instant getTimestampFromParser(BytesReference source, XContentTyp ); }; } catch (Exception e) { - throw new IllegalArgumentException("Error extracting data stream timestamp field: " + e.getMessage(), e); + throw new TimestampError("Error extracting data stream timestamp field: " + e.getMessage(), e); } } @@ -1741,4 +1741,20 @@ public DataStream build() { ); } } + + /** + * This is a specialised error to capture that a document does not have a valid timestamp + * to index a document. It is mainly applicable for TSDS data streams because they need the timestamp + * to determine the write index. + */ + public static class TimestampError extends IllegalArgumentException { + + public TimestampError(String message, Exception cause) { + super(message, cause); + } + + public TimestampError(String message) { + super(message); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java index fd3fc1a732acb..7315e9f7a51d3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinition.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -19,6 +20,8 @@ import org.elasticsearch.index.mapper.RoutingFieldMapper; import java.io.IOException; +import java.util.HashSet; +import java.util.Set; /** * A utility class that contains the mappings and settings logic for failure store indices that are a part of data streams. @@ -26,12 +29,30 @@ public class DataStreamFailureStoreDefinition { public static final String FAILURE_STORE_REFRESH_INTERVAL_SETTING_NAME = "data_streams.failure_store.refresh_interval"; + public static final String INDEX_FAILURE_STORE_VERSION_SETTING_NAME = "index.failure_store.version"; public static final Settings DATA_STREAM_FAILURE_STORE_SETTINGS; + // Only a subset of user configurable settings is applicable for a failure index. Here we have an + // allowlist that will filter all other settings out. + public static final Set SUPPORTED_USER_SETTINGS = Set.of( + DataTier.TIER_PREFERENCE, + IndexMetadata.SETTING_INDEX_HIDDEN, + INDEX_FAILURE_STORE_VERSION_SETTING_NAME, + IndexMetadata.SETTING_NUMBER_OF_SHARDS, + IndexMetadata.SETTING_NUMBER_OF_REPLICAS, + IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, + IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), + IndexMetadata.LIFECYCLE_NAME + ); + public static final Set SUPPORTED_USER_SETTINGS_PREFIXES = Set.of( + IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ".", + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + ".", + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "." + ); public static final CompressedXContent DATA_STREAM_FAILURE_STORE_MAPPING; public static final int FAILURE_STORE_DEFINITION_VERSION = 1; public static final Setting FAILURE_STORE_DEFINITION_VERSION_SETTING = Setting.intSetting( - "index.failure_store.version", + INDEX_FAILURE_STORE_VERSION_SETTING_NAME, 0, Setting.Property.IndexScope ); @@ -40,11 +61,6 @@ public class DataStreamFailureStoreDefinition { DATA_STREAM_FAILURE_STORE_SETTINGS = Settings.builder() // Always start with the hidden settings for a backing index. .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) - // Override any pipeline settings on the failure store to not use any - // specified by the data stream template. Default pipelines are very much - // meant for the backing indices only. - .putNull(IndexSettings.DEFAULT_PIPELINE.getKey()) - .putNull(IndexSettings.FINAL_PIPELINE.getKey()) .put(FAILURE_STORE_DEFINITION_VERSION_SETTING.getKey(), FAILURE_STORE_DEFINITION_VERSION) .build(); @@ -199,4 +215,23 @@ public static Settings.Builder applyFailureStoreSettings(Settings nodeSettings, } return builder; } + + /** + * Removes the unsupported by the failure store settings from the settings provided. + * ATTENTION: This method should be applied BEFORE we set the necessary settings for an index + * @param builder the settings builder that is going to be updated + * @return the original settings builder, with the unsupported settings removed. + */ + public static Settings.Builder filterUserDefinedSettings(Settings.Builder builder) { + if (builder.keys().isEmpty() == false) { + Set existingKeys = new HashSet<>(builder.keys()); + for (String setting : existingKeys) { + if (SUPPORTED_USER_SETTINGS.contains(setting) == false + && SUPPORTED_USER_SETTINGS_PREFIXES.stream().anyMatch(setting::startsWith) == false) { + builder.remove(setting); + } + } + } + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index eaf54034b22e0..2229166a2d779 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -74,15 +74,6 @@ public IndexNameExpressionResolver(ThreadContext threadContext, SystemIndices sy this.systemIndices = Objects.requireNonNull(systemIndices, "System Indices must not be null"); } - /** - * This contains the resolved expression in the form of the resource. - * Soon it will facilitate the index component selector. - * @param resource the resolved resolvedExpression - */ - public record ResolvedExpression(String resource) { - - } - /** * Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options * are encapsulated in the specified request. @@ -200,9 +191,8 @@ public List dataStreamNames(ClusterState state, IndicesOptions options, getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressions(context, indexExpressions); + final Collection expressions = resolveExpressions(context, indexExpressions); return expressions.stream() - .map(ResolvedExpression::resource) .map(x -> state.metadata().getIndicesLookup().get(x)) .filter(Objects::nonNull) .filter(ia -> ia.getType() == Type.DATA_STREAM) @@ -231,11 +221,10 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit getNetNewSystemIndexPredicate() ); - final Collection expressions = resolveExpressions(context, request.index()); + final Collection expressions = resolveExpressions(context, request.index()); if (expressions.size() == 1) { - ResolvedExpression resolvedExpression = expressions.iterator().next(); - IndexAbstraction ia = state.metadata().getIndicesLookup().get(resolvedExpression.resource()); + IndexAbstraction ia = state.metadata().getIndicesLookup().get(expressions.iterator().next()); if (ia.getType() == Type.ALIAS) { Index writeIndex = ia.getWriteIndex(); if (writeIndex == null) { @@ -257,14 +246,14 @@ public IndexAbstraction resolveWriteIndexAbstraction(ClusterState state, DocWrit } } - protected static Collection resolveExpressions(Context context, String... expressions) { + protected static Collection resolveExpressions(Context context, String... expressions) { if (context.getOptions().expandWildcardExpressions() == false) { if (expressions == null || expressions.length == 0 || expressions.length == 1 && Metadata.ALL.equals(expressions[0])) { return List.of(); } else { return ExplicitResourceNameFilter.filterUnavailable( context, - DateMathExpressionResolver.resolve(context, Arrays.stream(expressions).map(ResolvedExpression::new).toList()) + DateMathExpressionResolver.resolve(context, List.of(expressions)) ); } } else { @@ -275,10 +264,7 @@ protected static Collection resolveExpressions(Context conte } else { return WildcardExpressionResolver.resolve( context, - ExplicitResourceNameFilter.filterUnavailable( - context, - DateMathExpressionResolver.resolve(context, Arrays.stream(expressions).map(ResolvedExpression::new).toList()) - ) + ExplicitResourceNameFilter.filterUnavailable(context, DateMathExpressionResolver.resolve(context, List.of(expressions))) ); } } @@ -353,12 +339,12 @@ String[] concreteIndexNames(Context context, String... indexExpressions) { } Index[] concreteIndices(Context context, String... indexExpressions) { - final Collection expressions = resolveExpressions(context, indexExpressions); + final Collection expressions = resolveExpressions(context, indexExpressions); final Set concreteIndicesResult = Sets.newLinkedHashSetWithExpectedSize(expressions.size()); final Map indicesLookup = context.getState().metadata().getIndicesLookup(); - for (ResolvedExpression resolvedExpression : expressions) { - final IndexAbstraction indexAbstraction = indicesLookup.get(resolvedExpression.resource()); + for (String expression : expressions) { + final IndexAbstraction indexAbstraction = indicesLookup.get(expression); assert indexAbstraction != null; if (indexAbstraction.getType() == Type.ALIAS && context.isResolveToWriteIndex()) { Index writeIndex = indexAbstraction.getWriteIndex(); @@ -392,7 +378,7 @@ Index[] concreteIndices(Context context, String... indexExpressions) { throw new IllegalArgumentException( indexAbstraction.getType().getDisplayName() + " [" - + resolvedExpression.resource() + + expression + "] has more than one index associated with it " + Arrays.toString(indexNames) + ", can't execute a single index op" @@ -656,7 +642,7 @@ public Index concreteSingleIndex(ClusterState state, IndicesRequest request) { * Utility method that allows to resolve an index expression to its corresponding single write index. * * @param state the cluster state containing all the data to resolve to expression to a concrete index - * @param request The request that defines how an alias or an index need to be resolved to a concrete index + * @param request The request that defines how the an alias or an index need to be resolved to a concrete index * and the expression that can be resolved to an alias or an index name. * @throws IllegalArgumentException if the index resolution does not lead to an index, or leads to more than one index * @return the write index obtained as a result of the index resolution @@ -748,7 +734,7 @@ public static String resolveDateMathExpression(String dateExpression, long time) /** * Resolve an array of expressions to the set of indices and aliases that these expressions match. */ - public Set resolveExpressions(ClusterState state, String... expressions) { + public Set resolveExpressions(ClusterState state, String... expressions) { return resolveExpressions(state, IndicesOptions.lenientExpandOpen(), false, expressions); } @@ -757,7 +743,7 @@ public Set resolveExpressions(ClusterState state, String... * If {@param preserveDataStreams} is {@code true}, datastreams that are covered by the wildcards from the * {@param expressions} are returned as-is, without expanding them further to their respective backing indices. */ - public Set resolveExpressions( + public Set resolveExpressions( ClusterState state, IndicesOptions indicesOptions, boolean preserveDataStreams, @@ -774,10 +760,10 @@ public Set resolveExpressions( getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - Collection resolved = resolveExpressions(context, expressions); - if (resolved instanceof Set) { + Collection resolved = resolveExpressions(context, expressions); + if (resolved instanceof Set) { // unmodifiable without creating a new collection as it might contain many items - return Collections.unmodifiableSet((Set) resolved); + return Collections.unmodifiableSet((Set) resolved); } else { return Set.copyOf(resolved); } @@ -790,7 +776,7 @@ public Set resolveExpressions( * the index itself - null is returned. Returns {@code null} if no filtering is required. * NOTE: The provided expressions must have been resolved already via {@link #resolveExpressions}. */ - public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { + public String[] filteringAliases(ClusterState state, String index, Set resolvedExpressions) { return indexAliases(state, index, AliasMetadata::filteringRequired, DataStreamAlias::filteringRequired, false, resolvedExpressions); } @@ -816,39 +802,39 @@ public String[] indexAliases( Predicate requiredAlias, Predicate requiredDataStreamAlias, boolean skipIdentity, - Set resolvedExpressions + Set resolvedExpressions ) { - if (isAllIndicesExpression(resolvedExpressions)) { + if (isAllIndices(resolvedExpressions)) { return null; } - Set resources = resolvedExpressions.stream().map(ResolvedExpression::resource).collect(Collectors.toSet()); + final IndexMetadata indexMetadata = state.metadata().getIndices().get(index); if (indexMetadata == null) { // Shouldn't happen throw new IndexNotFoundException(index); } - if (skipIdentity == false && resources.contains(index)) { + if (skipIdentity == false && resolvedExpressions.contains(index)) { return null; } IndexAbstraction ia = state.metadata().getIndicesLookup().get(index); DataStream dataStream = ia.getParentDataStream(); if (dataStream != null) { - if (skipIdentity == false && resources.contains(dataStream.getName())) { + if (skipIdentity == false && resolvedExpressions.contains(dataStream.getName())) { // skip the filters when the request targets the data stream name return null; } Map dataStreamAliases = state.metadata().dataStreamAliases(); List aliasesForDataStream; - if (iterateIndexAliases(dataStreamAliases.size(), resources.size())) { + if (iterateIndexAliases(dataStreamAliases.size(), resolvedExpressions.size())) { aliasesForDataStream = dataStreamAliases.values() .stream() - .filter(dataStreamAlias -> resources.contains(dataStreamAlias.getName())) + .filter(dataStreamAlias -> resolvedExpressions.contains(dataStreamAlias.getName())) .filter(dataStreamAlias -> dataStreamAlias.getDataStreams().contains(dataStream.getName())) .toList(); } else { - aliasesForDataStream = resources.stream() + aliasesForDataStream = resolvedExpressions.stream() .map(dataStreamAliases::get) .filter(dataStreamAlias -> dataStreamAlias != null && dataStreamAlias.getDataStreams().contains(dataStream.getName())) .toList(); @@ -873,15 +859,18 @@ public String[] indexAliases( } else { final Map indexAliases = indexMetadata.getAliases(); final AliasMetadata[] aliasCandidates; - if (iterateIndexAliases(indexAliases.size(), resources.size())) { + if (iterateIndexAliases(indexAliases.size(), resolvedExpressions.size())) { // faster to iterate indexAliases aliasCandidates = indexAliases.values() .stream() - .filter(aliasMetadata -> resources.contains(aliasMetadata.alias())) + .filter(aliasMetadata -> resolvedExpressions.contains(aliasMetadata.alias())) .toArray(AliasMetadata[]::new); } else { // faster to iterate resolvedExpressions - aliasCandidates = resources.stream().map(indexAliases::get).filter(Objects::nonNull).toArray(AliasMetadata[]::new); + aliasCandidates = resolvedExpressions.stream() + .map(indexAliases::get) + .filter(Objects::nonNull) + .toArray(AliasMetadata[]::new); } List aliases = null; for (AliasMetadata aliasMetadata : aliasCandidates) { @@ -920,7 +909,12 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab getSystemIndexAccessPredicate(), getNetNewSystemIndexPredicate() ); - final Collection resolvedExpressions = resolveExpressions(context, expressions); + final Collection resolvedExpressions = resolveExpressions(context, expressions); + + // TODO: it appears that this can never be true? + if (isAllIndices(resolvedExpressions)) { + return resolveSearchRoutingAllIndices(state.metadata(), routing); + } Map> routings = null; Set paramRouting = null; @@ -930,8 +924,8 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab paramRouting = Sets.newHashSet(Strings.splitStringByCommaToArray(routing)); } - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(resolvedExpression.resource); + for (String expression : resolvedExpressions) { + IndexAbstraction indexAbstraction = state.metadata().getIndicesLookup().get(expression); if (indexAbstraction != null && indexAbstraction.getType() == Type.ALIAS) { for (Index index : indexAbstraction.getIndices()) { String concreteIndex = index.getName(); @@ -969,7 +963,7 @@ public Map> resolveSearchRouting(ClusterState state, @Nullab } } else { // Index - routings = collectRoutings(routings, paramRouting, norouting, resolvedExpression.resource()); + routings = collectRoutings(routings, paramRouting, norouting, expression); } } @@ -1015,17 +1009,6 @@ public static Map> resolveSearchRoutingAllIndices(Metadata m return null; } - /** - * Identifies whether the array containing index names given as argument refers to all indices - * The empty or null array identifies all indices - * - * @param aliasesOrIndices the array containing index names - * @return true if the provided array maps to all indices, false otherwise - */ - public static boolean isAllIndicesExpression(Collection aliasesOrIndices) { - return isAllIndices(aliasesOrIndices.stream().map(ResolvedExpression::resource).toList()); - } - /** * Identifies whether the array containing index names given as argument refers to all indices * The empty or null array identifies all indices @@ -1266,8 +1249,8 @@ private WildcardExpressionResolver() { * Returns all the indices, datastreams, and aliases, considering the open/closed, system, and hidden context parameters. * Depending on the context, returns the names of the datastreams themselves or their backing indices. */ - public static Collection resolveAll(Context context) { - List concreteIndices = resolveEmptyOrTrivialWildcard(context); + public static Collection resolveAll(Context context) { + List concreteIndices = resolveEmptyOrTrivialWildcard(context); if (context.includeDataStreams() == false && context.getOptions().ignoreAliases()) { return concreteIndices; @@ -1282,7 +1265,7 @@ public static Collection resolveAll(Context context) { .filter(ia -> shouldIncludeIfDataStream(ia, context) || shouldIncludeIfAlias(ia, context)) .filter(ia -> ia.isSystem() == false || context.systemIndexAccessPredicate.test(ia.getName())); - Set resolved = expandToOpenClosed(context, ias).collect(Collectors.toSet()); + Set resolved = expandToOpenClosed(context, ias).collect(Collectors.toSet()); resolved.addAll(concreteIndices); return resolved; } @@ -1310,17 +1293,17 @@ private static boolean shouldIncludeIfAlias(IndexAbstraction ia, IndexNameExpres * ultimately returned, instead of the alias or datastream name * */ - public static Collection resolve(Context context, List expressions) { + public static Collection resolve(Context context, List expressions) { ExpressionList expressionList = new ExpressionList(context, expressions); // fast exit if there are no wildcards to evaluate if (expressionList.hasWildcard() == false) { return expressions; } - Set result = new HashSet<>(); + Set result = new HashSet<>(); for (ExpressionList.Expression expression : expressionList) { if (expression.isWildcard()) { Stream matchingResources = matchResourcesToWildcard(context, expression.get()); - Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); + Stream matchingOpenClosedNames = expandToOpenClosed(context, matchingResources); AtomicBoolean emptyWildcardExpansion = new AtomicBoolean(false); if (context.getOptions().allowNoIndices() == false) { emptyWildcardExpansion.set(true); @@ -1336,9 +1319,9 @@ public static Collection resolve(Context context, List filterIndicesLookupForSuffixWildcar * Data streams and aliases are interpreted to refer to multiple indices, * then all index resources are filtered by their open/closed status. */ - private static Stream expandToOpenClosed(Context context, Stream resources) { + private static Stream expandToOpenClosed(Context context, Stream resources) { final IndexMetadata.State excludeState = excludeState(context.getOptions()); return resources.flatMap(indexAbstraction -> { if (context.isPreserveAliases() && indexAbstraction.getType() == Type.ALIAS) { - return Stream.of(new ResolvedExpression(indexAbstraction.getName())); + return Stream.of(indexAbstraction.getName()); } else if (context.isPreserveDataStreams() && indexAbstraction.getType() == Type.DATA_STREAM) { - return Stream.of(new ResolvedExpression(indexAbstraction.getName())); + return Stream.of(indexAbstraction.getName()); } else { Stream indicesStateStream = Stream.of(); if (shouldIncludeRegularIndices(context.getOptions())) { @@ -1451,20 +1434,18 @@ private static Stream expandToOpenClosed(Context context, St if (excludeState != null) { indicesStateStream = indicesStateStream.filter(indexMeta -> indexMeta.getState() != excludeState); } - return indicesStateStream.map(indexMeta -> new ResolvedExpression(indexMeta.getIndex().getName())); + return indicesStateStream.map(indexMeta -> indexMeta.getIndex().getName()); } }); } - private static List resolveEmptyOrTrivialWildcard(Context context) { + private static List resolveEmptyOrTrivialWildcard(Context context) { final String[] allIndices = resolveEmptyOrTrivialWildcardToAllIndices(context.getOptions(), context.getState().metadata()); - Stream result; if (context.systemIndexAccessLevel == SystemIndexAccessLevel.ALL) { - result = Arrays.stream(allIndices); + return List.of(allIndices); } else { - result = resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(context, allIndices).stream(); + return resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(context, allIndices); } - return result.map(ResolvedExpression::new).toList(); } private static List resolveEmptyOrTrivialWildcardWithAllowedSystemIndices(Context context, String[] allIndices) { @@ -1526,8 +1507,8 @@ private DateMathExpressionResolver() { // utility class } - public static List resolve(Context context, List expressions) { - List result = new ArrayList<>(expressions.size()); + public static List resolve(Context context, List expressions) { + List result = new ArrayList<>(expressions.size()); for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { result.add(resolveExpression(expression, context::getStartTime)); } @@ -1538,15 +1519,13 @@ static String resolveExpression(String expression) { return resolveExpression(expression, System::currentTimeMillis); } - static ResolvedExpression resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { - String result; + static String resolveExpression(ExpressionList.Expression expression, LongSupplier getTime) { if (expression.isExclusion()) { // accepts date-math exclusions that are of the form "-<...{}>", i.e. the "-" is outside the "<>" date-math template - result = "-" + resolveExpression(expression.get(), getTime); + return "-" + resolveExpression(expression.get(), getTime); } else { - result = resolveExpression(expression.get(), getTime); + return resolveExpression(expression.get(), getTime); } - return new ResolvedExpression(result); } static String resolveExpression(String expression, LongSupplier getTime) { @@ -1708,26 +1687,25 @@ private ExplicitResourceNameFilter() { * Returns an expression list with "unavailable" (missing or not acceptable) resource names filtered out. * Only explicit resource names are considered for filtering. Wildcard and exclusion expressions are kept in. */ - public static List filterUnavailable(Context context, List expressions) { + public static List filterUnavailable(Context context, List expressions) { ensureRemoteIndicesRequireIgnoreUnavailable(context.getOptions(), expressions); - List result = new ArrayList<>(expressions.size()); + List result = new ArrayList<>(expressions.size()); for (ExpressionList.Expression expression : new ExpressionList(context, expressions)) { validateAliasOrIndex(expression); - if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression)) { - result.add(expression.resolvedExpression()); + if (expression.isWildcard() || expression.isExclusion() || ensureAliasOrIndexExists(context, expression.get())) { + result.add(expression.expression()); } } return result; } /** - * This returns `true` if the given {@param resolvedExpression} is of a resource that exists. - * Otherwise, it returns `false` if the `ignore_unavailable` option is `true`, or, if `false`, it throws a "not found" type of + * This returns `true` if the given {@param name} is of a resource that exists. + * Otherwise, it returns `false` if the `ignore_unvailable` option is `true`, or, if `false`, it throws a "not found" type of * exception. */ @Nullable - private static boolean ensureAliasOrIndexExists(Context context, ExpressionList.Expression expression) { - String name = expression.get(); + private static boolean ensureAliasOrIndexExists(Context context, String name) { boolean ignoreUnavailable = context.getOptions().ignoreUnavailable(); IndexAbstraction indexAbstraction = context.getState().getMetadata().getIndicesLookup().get(name); if (indexAbstraction == null) { @@ -1759,37 +1737,32 @@ private static boolean ensureAliasOrIndexExists(Context context, ExpressionList. } private static void validateAliasOrIndex(ExpressionList.Expression expression) { - if (Strings.isEmpty(expression.resolvedExpression().resource())) { - throw notFoundException(expression.get()); + if (Strings.isEmpty(expression.expression())) { + throw notFoundException(expression.expression()); } // Expressions can not start with an underscore. This is reserved for APIs. If the check gets here, the API // does not exist and the path is interpreted as an expression. If the expression begins with an underscore, // throw a specific error that is different from the [[IndexNotFoundException]], which is typically thrown // if the expression can't be found. - if (expression.resolvedExpression().resource().charAt(0) == '_') { - throw new InvalidIndexNameException(expression.get(), "must not start with '_'."); + if (expression.expression().charAt(0) == '_') { + throw new InvalidIndexNameException(expression.expression(), "must not start with '_'."); } } - private static void ensureRemoteIndicesRequireIgnoreUnavailable( - IndicesOptions options, - List resolvedExpressions - ) { + private static void ensureRemoteIndicesRequireIgnoreUnavailable(IndicesOptions options, List indexExpressions) { if (options.ignoreUnavailable()) { return; } - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - var index = resolvedExpression.resource(); + for (String index : indexExpressions) { if (RemoteClusterAware.isRemoteIndexName(index)) { - failOnRemoteIndicesNotIgnoringUnavailable(resolvedExpressions); + failOnRemoteIndicesNotIgnoringUnavailable(indexExpressions); } } } - private static void failOnRemoteIndicesNotIgnoringUnavailable(List resolvedExpressions) { + private static void failOnRemoteIndicesNotIgnoringUnavailable(List indexExpressions) { List crossClusterIndices = new ArrayList<>(); - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - String index = resolvedExpression.resource(); + for (String index : indexExpressions) { if (RemoteClusterAware.isRemoteIndexName(index)) { crossClusterIndices.add(index); } @@ -1807,13 +1780,13 @@ public static final class ExpressionList implements Iterable expressionsList; private final boolean hasWildcard; - public record Expression(ResolvedExpression resolvedExpression, boolean isWildcard, boolean isExclusion) { + public record Expression(String expression, boolean isWildcard, boolean isExclusion) { public String get() { if (isExclusion()) { // drop the leading "-" if exclusion because it is easier for callers to handle it like this - return resolvedExpression().resource().substring(1); + return expression().substring(1); } else { - return resolvedExpression().resource(); + return expression(); } } } @@ -1822,17 +1795,16 @@ public String get() { * Creates the expression iterable that can be used to easily check which expression item is a wildcard or an exclusion (or both). * The {@param context} is used to check if wildcards ought to be considered or not. */ - public ExpressionList(Context context, List resolvedExpressions) { - List expressionsList = new ArrayList<>(resolvedExpressions.size()); + public ExpressionList(Context context, List expressionStrings) { + List expressionsList = new ArrayList<>(expressionStrings.size()); boolean wildcardSeen = false; - for (ResolvedExpression resolvedExpression : resolvedExpressions) { - var expressionString = resolvedExpression.resource(); + for (String expressionString : expressionStrings) { boolean isExclusion = expressionString.startsWith("-") && wildcardSeen; if (context.getOptions().expandWildcardExpressions() && isWildcard(expressionString)) { wildcardSeen = true; - expressionsList.add(new Expression(resolvedExpression, true, isExclusion)); + expressionsList.add(new Expression(expressionString, true, isExclusion)); } else { - expressionsList.add(new Expression(resolvedExpression, false, isExclusion)); + expressionsList.add(new Expression(expressionString, false, isExclusion)); } } this.expressionsList = expressionsList; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 2df9cf706d892..5dbf4da6f376f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -425,7 +425,8 @@ public static ClusterState createFailureStoreIndex( .nameResolvedInstant(nameResolvedInstant) .performReroute(false) .setMatchingTemplate(template) - .settings(indexSettings); + .settings(indexSettings) + .isFailureIndex(true); try { currentState = metadataCreateIndexService.applyCreateIndexRequest( diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index 321719475c1f8..69e3b7b70ff82 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -983,6 +983,7 @@ static Settings aggregateIndexSettings( final Settings templateAndRequestSettings = Settings.builder().put(combinedTemplateSettings).put(request.settings()).build(); final IndexMode templateIndexMode = Optional.of(request) + .filter(r -> r.isFailureIndex() == false) .map(CreateIndexClusterStateUpdateRequest::matchingTemplate) .map(metadata::retrieveIndexModeFromTemplate) .orElse(null); @@ -991,6 +992,7 @@ static Settings aggregateIndexSettings( // additionalIndexSettings map final Settings.Builder additionalIndexSettings = Settings.builder(); final var resolvedAt = Instant.ofEpochMilli(request.getNameResolvedAt()); + Set overrulingSettings = new HashSet<>(); for (IndexSettingProvider provider : indexSettingProviders) { var newAdditionalSettings = provider.getAdditionalIndexSettings( request.index(), @@ -1003,46 +1005,57 @@ static Settings aggregateIndexSettings( ); validateAdditionalSettings(provider, newAdditionalSettings, additionalIndexSettings); additionalIndexSettings.put(newAdditionalSettings); + if (provider.overrulesTemplateAndRequestSettings()) { + overrulingSettings.addAll(newAdditionalSettings.keySet()); + } } - // For all the explicit settings, we go through the template and request level settings - // and see if either a template or the request has "cancelled out" an explicit default - // setting. For example, if a plugin had as an explicit setting: - // "index.mysetting": "blah - // And either a template or create index request had: - // "index.mysetting": null - // We want to remove the explicit setting not only from the explicitly set settings, but - // also from the template and request settings, so that from the newly create index's - // perspective it is as though the setting has not been set at all (using the default - // value). for (String explicitSetting : additionalIndexSettings.keys()) { - if (templateSettings.keys().contains(explicitSetting) && templateSettings.get(explicitSetting) == null) { - logger.debug( - "removing default [{}] setting as it in set to null in a template for [{}] creation", - explicitSetting, - request.index() - ); - additionalIndexSettings.remove(explicitSetting); + if (overrulingSettings.contains(explicitSetting)) { + // Remove any conflicting template and request settings to use the provided values. templateSettings.remove(explicitSetting); - } - if (requestSettings.keys().contains(explicitSetting) && requestSettings.get(explicitSetting) == null) { - logger.debug( - "removing default [{}] setting as it in set to null in the request for [{}] creation", - explicitSetting, - request.index() - ); - additionalIndexSettings.remove(explicitSetting); requestSettings.remove(explicitSetting); + } else { + // For all the explicit settings, we go through the template and request level settings + // and see if either a template or the request has "cancelled out" an explicit default + // setting. For example, if a plugin had as an explicit setting: + // "index.mysetting": "blah + // And either a template or create index request had: + // "index.mysetting": null + // We want to remove the explicit setting not only from the explicitly set settings, but + // also from the template and request settings, so that from the newly create index's + // perspective it is as though the setting has not been set at all (using the default + // value). + if (templateSettings.keys().contains(explicitSetting) && templateSettings.get(explicitSetting) == null) { + logger.debug( + "removing default [{}] setting as it is set to null in a template for [{}] creation", + explicitSetting, + request.index() + ); + additionalIndexSettings.remove(explicitSetting); + templateSettings.remove(explicitSetting); + } + if (requestSettings.keys().contains(explicitSetting) && requestSettings.get(explicitSetting) == null) { + logger.debug( + "removing default [{}] setting as it is set to null in the request for [{}] creation", + explicitSetting, + request.index() + ); + additionalIndexSettings.remove(explicitSetting); + requestSettings.remove(explicitSetting); + } } } // Finally, we actually add the explicit defaults prior to the template settings and the // request settings, so that the precedence goes: - // Explicit Defaults -> Template -> Request -> Necessary Settings (# of shards, uuid, etc) + // Explicit Defaults -> Template -> Request -> Filter out failure store settings -> Necessary Settings (# of shards, uuid, etc) indexSettingsBuilder.put(additionalIndexSettings.build()); indexSettingsBuilder.put(templateSettings.build()); } - + if (request.isFailureIndex()) { + DataStreamFailureStoreDefinition.filterUserDefinedSettings(indexSettingsBuilder); + } // now, put the request settings, so they override templates indexSettingsBuilder.put(requestSettings.build()); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index ccdfaa5518aee..d6ed28454df96 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1200,6 +1200,42 @@ static ClusterState innerPutTemplate( return ClusterState.builder(currentState).metadata(builder).build(); } + /** + * A private, local alternative to elements.stream().anyMatch(predicate) for micro-optimization reasons. + */ + private static boolean anyMatch(final List elements, final Predicate predicate) { + for (T e : elements) { + if (predicate.test(e)) { + return true; + } + } + return false; + } + + /** + * A private, local alternative to elements.stream().noneMatch(predicate) for micro-optimization reasons. + */ + private static boolean noneMatch(final List elements, final Predicate predicate) { + for (T e : elements) { + if (predicate.test(e)) { + return false; + } + } + return true; + } + + /** + * A private, local alternative to elements.stream().filter(predicate).findFirst() for micro-optimization reasons. + */ + private static Optional findFirst(final List elements, final Predicate predicate) { + for (T e : elements) { + if (predicate.test(e)) { + return Optional.of(e); + } + } + return Optional.empty(); + } + /** * Finds index templates whose index pattern matched with the given index name. In the case of * hidden indices, a template with a match all pattern or global template will not be returned. @@ -1219,15 +1255,14 @@ public static List findV1Templates(Metadata metadata, Str final List matchedTemplates = new ArrayList<>(); for (IndexTemplateMetadata template : metadata.templates().values()) { if (isHidden == null || isHidden == Boolean.FALSE) { - final boolean matched = template.patterns().stream().anyMatch(patternMatchPredicate); - if (matched) { + if (anyMatch(template.patterns(), patternMatchPredicate)) { matchedTemplates.add(template); } } else { assert isHidden == Boolean.TRUE; - final boolean isNotMatchAllTemplate = template.patterns().stream().noneMatch(Regex::isMatchAllPattern); + final boolean isNotMatchAllTemplate = noneMatch(template.patterns(), Regex::isMatchAllPattern); if (isNotMatchAllTemplate) { - if (template.patterns().stream().anyMatch(patternMatchPredicate)) { + if (anyMatch(template.patterns(), patternMatchPredicate)) { matchedTemplates.add(template); } } @@ -1238,19 +1273,21 @@ public static List findV1Templates(Metadata metadata, Str // this is complex but if the index is not hidden in the create request but is hidden as the result of template application, // then we need to exclude global templates if (isHidden == null) { - final Optional templateWithHiddenSetting = matchedTemplates.stream() - .filter(template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings())) - .findFirst(); + final Optional templateWithHiddenSetting = findFirst( + matchedTemplates, + template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings()) + ); if (templateWithHiddenSetting.isPresent()) { final boolean templatedIsHidden = IndexMetadata.INDEX_HIDDEN_SETTING.get(templateWithHiddenSetting.get().settings()); if (templatedIsHidden) { // remove the global templates - matchedTemplates.removeIf(current -> current.patterns().stream().anyMatch(Regex::isMatchAllPattern)); + matchedTemplates.removeIf(current -> anyMatch(current.patterns(), Regex::isMatchAllPattern)); } // validate that hidden didn't change - final Optional templateWithHiddenSettingPostRemoval = matchedTemplates.stream() - .filter(template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings())) - .findFirst(); + final Optional templateWithHiddenSettingPostRemoval = findFirst( + matchedTemplates, + template -> IndexMetadata.INDEX_HIDDEN_SETTING.exists(template.settings()) + ); if (templateWithHiddenSettingPostRemoval.isEmpty() || templateWithHiddenSetting.get() != templateWithHiddenSettingPostRemoval.get()) { throw new IllegalStateException( @@ -1313,14 +1350,13 @@ static List> findV2CandidateTemplates(Met * built with a template that none of its indices match. */ if (isHidden == false || template.getDataStreamTemplate() != null) { - final boolean matched = template.indexPatterns().stream().anyMatch(patternMatchPredicate); - if (matched) { + if (anyMatch(template.indexPatterns(), patternMatchPredicate)) { candidates.add(Tuple.tuple(name, template)); } } else { - final boolean isNotMatchAllTemplate = template.indexPatterns().stream().noneMatch(Regex::isMatchAllPattern); + final boolean isNotMatchAllTemplate = noneMatch(template.indexPatterns(), Regex::isMatchAllPattern); if (isNotMatchAllTemplate) { - if (template.indexPatterns().stream().anyMatch(patternMatchPredicate)) { + if (anyMatch(template.indexPatterns(), patternMatchPredicate)) { candidates.add(Tuple.tuple(name, template)); } } @@ -1334,7 +1370,7 @@ static List> findV2CandidateTemplates(Met // Checks if a global template specifies the `index.hidden` setting. This check is important because a global // template shouldn't specify the `index.hidden` setting, we leave it up to the caller to handle this situation. private static boolean isGlobalAndHasIndexHiddenSetting(Metadata metadata, ComposableIndexTemplate template, String templateName) { - return template.indexPatterns().stream().anyMatch(Regex::isMatchAllPattern) + return anyMatch(template.indexPatterns(), Regex::isMatchAllPattern) && IndexMetadata.INDEX_HIDDEN_SETTING.exists(resolveSettings(metadata, templateName)); } diff --git a/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java b/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java index a900722397edd..41998bf974bf9 100644 --- a/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java +++ b/server/src/main/java/org/elasticsearch/common/file/AbstractFileWatchingService.java @@ -313,12 +313,20 @@ void processSettingsOnServiceStartAndNotifyListeners() throws InterruptedExcepti void processSettingsAndNotifyListeners() throws InterruptedException { try { processFileChanges(); - for (var listener : eventListeners) { - listener.watchedFileChanged(); - } } catch (IOException | ExecutionException e) { - logger.error(() -> "Error processing watched file: " + watchedFile(), e); + onProcessFileChangesException(e); + return; } + for (var listener : eventListeners) { + listener.watchedFileChanged(); + } + } + + /** + * Called for checked exceptions only. + */ + protected void onProcessFileChangesException(Exception e) { + logger.error(() -> "Error processing watched file: " + watchedFile(), e); } // package private for testing diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 8cbacccb915ac..7bb78eabc8727 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -108,6 +108,7 @@ import org.elasticsearch.monitor.process.ProcessService; import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeRoleSettings; +import org.elasticsearch.node.ShutdownPrepareService; import org.elasticsearch.persistent.PersistentTasksClusterService; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.plugins.PluginsService; @@ -456,6 +457,8 @@ public void apply(Settings value, Settings current, Settings previous) { Environment.PATH_SHARED_DATA_SETTING, NodeEnvironment.NODE_ID_SEED_SETTING, Node.INITIAL_STATE_TIMEOUT_SETTING, + ShutdownPrepareService.MAXIMUM_SHUTDOWN_TIMEOUT_SETTING, + ShutdownPrepareService.MAXIMUM_REINDEXING_TIMEOUT_SETTING, DiscoveryModule.DISCOVERY_TYPE_SETTING, DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING, DiscoveryModule.ELECTION_STRATEGY_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java b/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java index 208d29edad71d..288462ba3bbcb 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/BytesRefHash.java @@ -48,7 +48,7 @@ public BytesRefHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { boolean success = false; try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - this.hashes = bigArrays.newIntArray(capacity, false); + this.hashes = bigArrays.newIntArray(maxSize, false); this.bytesRefs = new BytesRefArray(capacity, bigArrays); success = true; } finally { @@ -98,7 +98,7 @@ public BytesRefHash(BytesRefArray bytesRefs, float maxLoadFactor, BigArrays bigA boolean success = false; try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - this.hashes = bigArrays.newIntArray(bytesRefs.size() + 1, false); + this.hashes = bigArrays.newIntArray(maxSize, false); this.bytesRefs = BytesRefArray.takeOwnershipOf(bytesRefs); success = true; } finally { @@ -182,7 +182,6 @@ private long set(BytesRef key, int code, long id) { private void append(long id, BytesRef key, int code) { assert size == id; bytesRefs.append(key); - hashes = bigArrays.grow(hashes, id + 1); hashes.set(id, code); } @@ -211,6 +210,7 @@ public long add(BytesRef key, int code) { if (size >= maxSize) { assert size == maxSize; grow(); + hashes = bigArrays.resize(hashes, maxSize); } assert size < maxSize; return set(key, rehash(code), size); diff --git a/server/src/main/java/org/elasticsearch/common/util/LongHash.java b/server/src/main/java/org/elasticsearch/common/util/LongHash.java index 0c681063c50b0..3eeb60e419a19 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongHash.java @@ -33,7 +33,7 @@ public LongHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { super(capacity, maxLoadFactor, bigArrays); try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - keys = bigArrays.newLongArray(capacity, false); + keys = bigArrays.newLongArray(maxSize, false); } finally { if (keys == null) { close(); @@ -78,7 +78,6 @@ private long set(long key, long id) { } private void append(long id, long key) { - keys = bigArrays.grow(keys, id + 1); keys.set(id, key); } @@ -102,6 +101,7 @@ public long add(long key) { if (size >= maxSize) { assert size == maxSize; grow(); + keys = bigArrays.resize(keys, maxSize); } assert size < maxSize; return set(key, size); diff --git a/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java b/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java index f7708af59dde2..031794ed9c9c6 100644 --- a/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java +++ b/server/src/main/java/org/elasticsearch/common/util/LongLongHash.java @@ -40,7 +40,7 @@ public LongLongHash(long capacity, float maxLoadFactor, BigArrays bigArrays) { super(capacity, maxLoadFactor, bigArrays); try { // `super` allocates a big array so we have to `close` if we fail here or we'll leak it. - keys = bigArrays.newLongArray(2 * capacity, false); + keys = bigArrays.newLongArray(2 * maxSize, false); } finally { if (keys == null) { close(); @@ -99,7 +99,6 @@ private long set(long key1, long key2, long id) { private void append(long id, long key1, long key2) { long keyOffset = 2 * id; - keys = bigArrays.grow(keys, keyOffset + 2); keys.set(keyOffset, key1); keys.set(keyOffset + 1, key2); } @@ -128,6 +127,7 @@ public long add(long key1, long key2) { if (size >= maxSize) { assert size == maxSize; grow(); + keys = bigArrays.resize(keys, maxSize * 2); } assert size < maxSize; return set(key1, key2, size); diff --git a/server/src/main/java/org/elasticsearch/env/BuildVersion.java b/server/src/main/java/org/elasticsearch/env/BuildVersion.java index 0de346249ccbc..42c45a14977eb 100644 --- a/server/src/main/java/org/elasticsearch/env/BuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/BuildVersion.java @@ -59,7 +59,6 @@ public abstract class BuildVersion { public abstract boolean isFutureVersion(); // temporary - // TODO[wrb]: remove from PersistedClusterStateService // TODO[wrb]: remove from security bootstrap checks @Deprecated public Version toVersion() { diff --git a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java index e0531b5a192a0..dcc5ed3aee3f8 100644 --- a/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java +++ b/server/src/main/java/org/elasticsearch/env/DefaultBuildVersion.java @@ -72,6 +72,6 @@ public int hashCode() { @Override public String toString() { - return Version.fromId(versionId).toString(); + return version.toString(); } } diff --git a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java index 6a72a7e7fcda5..5b2ee39c1b622 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeMetadata.java +++ b/server/src/main/java/org/elasticsearch/env/NodeMetadata.java @@ -42,7 +42,6 @@ public final class NodeMetadata { private final IndexVersion oldestIndexVersion; - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) // version should be non-null in the node metadata from v9 onwards private NodeMetadata( final String nodeId, final BuildVersion buildVersion, @@ -112,11 +111,7 @@ public IndexVersion oldestIndexVersion() { return oldestIndexVersion; } - @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) public void verifyUpgradeToCurrentVersion() { - // Enable the following assertion for V9: - // assert (nodeVersion.equals(BuildVersion.empty()) == false) : "version is required in the node metadata from v9 onwards"; - if (nodeVersion.onOrAfterMinimumCompatible() == false) { throw new IllegalStateException( "cannot upgrade a node from version [" diff --git a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java index 96158965cddfe..1ddc8d5b26bd9 100644 --- a/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java +++ b/server/src/main/java/org/elasticsearch/env/OverrideNodeVersionCommand.java @@ -74,7 +74,7 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio "found [" + nodeMetadata + "] which is compatible with current version [" - + Version.CURRENT + + BuildVersion.current() + "], so there is no need to override the version checks" ); } catch (IllegalStateException e) { @@ -86,10 +86,10 @@ protected void processDataPaths(Terminal terminal, Path[] paths, OptionSet optio (nodeMetadata.nodeVersion().onOrAfterMinimumCompatible() == false ? TOO_OLD_MESSAGE : TOO_NEW_MESSAGE).replace( "V_OLD", nodeMetadata.nodeVersion().toString() - ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", Version.CURRENT.toString()) + ).replace("V_NEW", nodeMetadata.nodeVersion().toString()).replace("V_CUR", BuildVersion.current().toString()) ); - PersistedClusterStateService.overrideVersion(Version.CURRENT, paths); + PersistedClusterStateService.overrideVersion(BuildVersion.current(), paths); terminal.println(SUCCESS_MESSAGE); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index 0c6cf2c8a0761..92b8686700a05 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -42,7 +42,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.InfoStream; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -159,8 +158,6 @@ public class PersistedClusterStateService { public static final int IS_LAST_PAGE = 1; public static final int IS_NOT_LAST_PAGE = 0; private static final int COMMIT_DATA_SIZE = 7; - // We added CLUSTER_UUID_KEY and CLUSTER_UUID_COMMITTED_KEY in 8.8 - private static final int COMMIT_DATA_SIZE_BEFORE_8_8 = 5; private static final MergePolicy NO_MERGE_POLICY = noMergePolicy(); private static final MergePolicy DEFAULT_MERGE_POLICY = defaultMergePolicy(); @@ -350,7 +347,7 @@ public record OnDiskStateMetadata( @Nullable public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { String nodeId = null; - Version version = null; + BuildVersion version = null; IndexVersion oldestIndexVersion = IndexVersions.ZERO; for (final Path dataPath : dataPaths) { final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); @@ -367,7 +364,7 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { ); } else if (nodeId == null) { nodeId = thisNodeId; - version = Version.fromId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); + version = BuildVersion.fromVersionId(Integer.parseInt(userData.get(NODE_VERSION_KEY))); if (userData.containsKey(OLDEST_INDEX_VERSION_KEY)) { oldestIndexVersion = IndexVersion.fromId(Integer.parseInt(userData.get(OLDEST_INDEX_VERSION_KEY))); } else { @@ -382,14 +379,13 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException { if (nodeId == null) { return null; } - // TODO: remove use of Version here (ES-7343) - return new NodeMetadata(nodeId, BuildVersion.fromVersionId(version.id()), oldestIndexVersion); + return new NodeMetadata(nodeId, version, oldestIndexVersion); } /** * Overrides the version field for the metadata in the given data path */ - public static void overrideVersion(Version newVersion, Path... dataPaths) throws IOException { + public static void overrideVersion(BuildVersion newVersion, Path... dataPaths) throws IOException { for (final Path dataPath : dataPaths) { final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME); if (Files.exists(indexPath)) { @@ -399,7 +395,7 @@ public static void overrideVersion(Version newVersion, Path... dataPaths) throws try (IndexWriter indexWriter = createIndexWriter(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) { final Map commitData = new HashMap<>(userData); - commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id)); + commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id())); commitData.put(OVERRIDDEN_NODE_VERSION_KEY, Boolean.toString(true)); indexWriter.setLiveCommitData(commitData.entrySet()); indexWriter.commit(); @@ -664,11 +660,9 @@ public OnDiskStateMetadata loadOnDiskStateMetadataFromUserData(Map commitData = Maps.newMapWithExpectedSize(COMMIT_DATA_SIZE); commitData.put(CURRENT_TERM_KEY, Long.toString(currentTerm)); commitData.put(LAST_ACCEPTED_VERSION_KEY, Long.toString(lastAcceptedVersion)); - commitData.put(NODE_VERSION_KEY, Integer.toString(Version.CURRENT.id)); + commitData.put(NODE_VERSION_KEY, Integer.toString(BuildVersion.current().id())); commitData.put(OLDEST_INDEX_VERSION_KEY, Integer.toString(oldestIndexVersion.id())); commitData.put(NODE_ID_KEY, nodeId); commitData.put(CLUSTER_UUID_KEY, clusterUUID); diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java index 0180d2c8df119..6a553d5dc5440 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettingProvider.java @@ -57,4 +57,15 @@ Settings getAdditionalIndexSettings( record Parameters(CheckedFunction mapperServiceFactory) { } + + /** + * Indicates whether the additional settings that this provider returns can overrule the settings defined in matching template + * or in create index request. + * + * Note that this is not used during index template validation, to avoid overruling template settings that may apply to + * different contexts (e.g. the provider is not used, or it returns different setting values). + */ + default boolean overrulesTemplateAndRequestSettings() { + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java index d5c94de1c6942..a7560ce6f3caf 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java +++ b/server/src/main/java/org/elasticsearch/index/codec/tsdb/ES87TSDBDocValuesProducer.java @@ -132,7 +132,7 @@ public BinaryDocValues getBinary(FieldInfo field) throws IOException { return DocValues.emptyBinary(); } - final IndexInput bytesSlice = data.slice("fixed-binary", entry.dataOffset, entry.dataLength); + final RandomAccessInput bytesSlice = data.randomAccessSlice(entry.dataOffset, entry.dataLength); if (entry.docsWithFieldOffset == -1) { // dense @@ -144,8 +144,7 @@ public BinaryDocValues getBinary(FieldInfo field) throws IOException { @Override public BytesRef binaryValue() throws IOException { - bytesSlice.seek((long) doc * length); - bytesSlice.readBytes(bytes.bytes, 0, length); + bytesSlice.readBytes((long) doc * length, bytes.bytes, 0, length); return bytes; } }; @@ -160,8 +159,7 @@ public BytesRef binaryValue() throws IOException { public BytesRef binaryValue() throws IOException { long startOffset = addresses.get(doc); bytes.length = (int) (addresses.get(doc + 1L) - startOffset); - bytesSlice.seek(startOffset); - bytesSlice.readBytes(bytes.bytes, 0, bytes.length); + bytesSlice.readBytes(startOffset, bytes.bytes, 0, bytes.length); return bytes; } }; @@ -184,8 +182,7 @@ public BytesRef binaryValue() throws IOException { @Override public BytesRef binaryValue() throws IOException { - bytesSlice.seek((long) disi.index() * length); - bytesSlice.readBytes(bytes.bytes, 0, length); + bytesSlice.readBytes((long) disi.index() * length, bytes.bytes, 0, length); return bytes; } }; @@ -201,8 +198,7 @@ public BytesRef binaryValue() throws IOException { final int index = disi.index(); long startOffset = addresses.get(index); bytes.length = (int) (addresses.get(index + 1L) - startOffset); - bytesSlice.seek(startOffset); - bytesSlice.readBytes(bytes.bytes, 0, bytes.length); + bytesSlice.readBytes(startOffset, bytes.bytes, 0, bytes.length); return bytes; } }; @@ -407,7 +403,7 @@ private static class TermsDict extends BaseTermsEnum { final IndexInput bytes; final long blockMask; final LongValues indexAddresses; - final IndexInput indexBytes; + final RandomAccessInput indexBytes; final BytesRef term; long ord = -1; @@ -427,7 +423,7 @@ private static class TermsDict extends BaseTermsEnum { entry.termsIndexAddressesLength ); indexAddresses = DirectMonotonicReader.getInstance(entry.termsIndexAddressesMeta, indexAddressesSlice); - indexBytes = data.slice("terms-index", entry.termsIndexOffset, entry.termsIndexLength); + indexBytes = data.randomAccessSlice(entry.termsIndexOffset, entry.termsIndexLength); term = new BytesRef(entry.maxTermLength); // add the max term length for the dictionary @@ -485,8 +481,7 @@ private BytesRef getTermFromIndex(long index) throws IOException { assert index >= 0 && index <= (entry.termsDictSize - 1) >>> entry.termsDictIndexShift; final long start = indexAddresses.get(index); term.length = (int) (indexAddresses.get(index + 1) - start); - indexBytes.seek(start); - indexBytes.readBytes(term.bytes, 0, term.length); + indexBytes.readBytes(start, term.bytes, 0, term.length); return term; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java index 3512989c115ee..c38b5beeb55a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/AbstractGeometryFieldMapper.java @@ -189,8 +189,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { protected BlockLoader blockLoaderFromSource(BlockLoaderContext blContext) { ValueFetcher fetcher = valueFetcher(blContext.sourcePaths(name()), nullValue, GeometryFormatterFactory.WKB); // TODO consider optimization using BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll(), sourceMode); + return new BlockSourceReader.GeometriesBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); } protected abstract Object nullValueAsSource(T nullValue); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java index 105943c732a5e..19a1cce746172 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockSourceReader.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Set; /** * Loads values from {@code _source}. This whole process is very slow and cast-tastic, @@ -30,14 +29,6 @@ * slow. */ public abstract class BlockSourceReader implements BlockLoader.RowStrideReader { - - // _ignored_source is needed when source mode is synthetic. - static final StoredFieldsSpec NEEDS_SOURCE_AND_IGNORED_SOURCE = new StoredFieldsSpec( - true, - false, - Set.of(IgnoredSourceFieldMapper.NAME) - ); - private final ValueFetcher fetcher; private final List ignoredValues = new ArrayList<>(); private final DocIdSetIterator iter; @@ -100,12 +91,10 @@ public interface LeafIteratorLookup { private abstract static class SourceBlockLoader implements BlockLoader { protected final ValueFetcher fetcher; private final LeafIteratorLookup lookup; - private final SourceFieldMapper.Mode sourceMode; - private SourceBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { + private SourceBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { this.fetcher = fetcher; this.lookup = lookup; - this.sourceMode = sourceMode; } @Override @@ -115,7 +104,7 @@ public final ColumnAtATimeReader columnAtATimeReader(LeafReaderContext context) @Override public final StoredFieldsSpec rowStrideStoredFieldSpec() { - return sourceMode == SourceFieldMapper.Mode.SYNTHETIC ? NEEDS_SOURCE_AND_IGNORED_SOURCE : StoredFieldsSpec.NEEDS_SOURCE; + return StoredFieldsSpec.NEEDS_SOURCE; } @Override @@ -151,8 +140,8 @@ public final String toString() { * Load {@code boolean}s from {@code _source}. */ public static class BooleansBlockLoader extends SourceBlockLoader { - public BooleansBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public BooleansBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -191,8 +180,8 @@ public String toString() { * Load {@link BytesRef}s from {@code _source}. */ public static class BytesRefsBlockLoader extends SourceBlockLoader { - public BytesRefsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public BytesRefsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -202,7 +191,7 @@ public final Builder builder(BlockFactory factory, int expectedCount) { @Override protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) throws IOException { - return new BytesRefs(fetcher, iter, null); + return new BytesRefs(fetcher, iter); } @Override @@ -212,8 +201,8 @@ protected String name() { } public static class GeometriesBlockLoader extends SourceBlockLoader { - public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public GeometriesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -223,7 +212,7 @@ public final Builder builder(BlockFactory factory, int expectedCount) { @Override protected RowStrideReader rowStrideReader(LeafReaderContext context, DocIdSetIterator iter) { - return new Geometries(fetcher, iter, null); + return new Geometries(fetcher, iter); } @Override @@ -235,7 +224,7 @@ protected String name() { private static class BytesRefs extends BlockSourceReader { private final BytesRef scratch = new BytesRef(); - BytesRefs(ValueFetcher fetcher, DocIdSetIterator iter, SourceFieldMapper.Mode sourceMode) { + BytesRefs(ValueFetcher fetcher, DocIdSetIterator iter) { super(fetcher, iter); } @@ -252,7 +241,7 @@ public String toString() { private static class Geometries extends BlockSourceReader { - Geometries(ValueFetcher fetcher, DocIdSetIterator iter, SourceFieldMapper.Mode sourceMode) { + Geometries(ValueFetcher fetcher, DocIdSetIterator iter) { super(fetcher, iter); } @@ -275,8 +264,8 @@ public String toString() { * Load {@code double}s from {@code _source}. */ public static class DoublesBlockLoader extends SourceBlockLoader { - public DoublesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public DoublesBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -315,8 +304,8 @@ public String toString() { * Load {@code int}s from {@code _source}. */ public static class IntsBlockLoader extends SourceBlockLoader { - public IntsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public IntsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override @@ -355,8 +344,8 @@ public String toString() { * Load {@code long}s from {@code _source}. */ public static class LongsBlockLoader extends SourceBlockLoader { - public LongsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup, SourceFieldMapper.Mode sourceMode) { - super(fetcher, lookup, sourceMode); + public LongsBlockLoader(ValueFetcher fetcher, LeafIteratorLookup lookup) { + super(fetcher, lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index c2bf9e18bfeec..5aaaf7dce83c9 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -314,7 +314,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isIndexed() || isStored() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - return new BlockSourceReader.BooleansBlockLoader(fetcher, lookup, blContext.indexSettings().getIndexMappingSourceMode()); + return new BlockSourceReader.BooleansBlockLoader(fetcher, lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index d05f0e477db09..87e4ce5f90479 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -793,8 +793,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name())), lookup, sourceMode); + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index bac987a3df96d..1ed0a117ddd89 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -810,8 +810,10 @@ private static void parseNonDynamicArray( boolean objectWithFallbackSyntheticSource = false; if (mapper instanceof ObjectMapper objectMapper) { mode = getSourceKeepMode(context, objectMapper.sourceKeepMode()); - objectWithFallbackSyntheticSource = (mode == Mapper.SourceKeepMode.ALL - || (mode == Mapper.SourceKeepMode.ARRAYS && objectMapper instanceof NestedObjectMapper == false)); + objectWithFallbackSyntheticSource = mode == Mapper.SourceKeepMode.ALL + // Inside nested objects we always store object arrays as a workaround for #115261. + || ((context.inNestedScope() || mode == Mapper.SourceKeepMode.ARRAYS) + && objectMapper instanceof NestedObjectMapper == false); } boolean fieldWithFallbackSyntheticSource = false; boolean fieldWithStoredArraySource = false; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index ef87ce52fbabf..3b1f1a6d2809a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -104,6 +104,16 @@ public int get() { } } + /** + * Defines the scope parser is currently in. + * This is used for synthetic source related logic during parsing. + */ + private enum Scope { + SINGLETON, + ARRAY, + NESTED + } + private final MappingLookup mappingLookup; private final MappingParserContext mappingParserContext; private final SourceToParse sourceToParse; @@ -112,7 +122,7 @@ public int get() { private final List ignoredFieldValues; private final List ignoredFieldsMissingValues; private boolean inArrayScopeEnabled; - private boolean inArrayScope; + private Scope currentScope; private final Map> dynamicMappers; private final DynamicMapperSize dynamicMappersSize; @@ -145,7 +155,7 @@ private DocumentParserContext( List ignoredFieldValues, List ignoredFieldsWithNoSource, boolean inArrayScopeEnabled, - boolean inArrayScope, + Scope currentScope, Map> dynamicMappers, Map dynamicObjectMappers, Map> dynamicRuntimeFields, @@ -167,7 +177,7 @@ private DocumentParserContext( this.ignoredFieldValues = ignoredFieldValues; this.ignoredFieldsMissingValues = ignoredFieldsWithNoSource; this.inArrayScopeEnabled = inArrayScopeEnabled; - this.inArrayScope = inArrayScope; + this.currentScope = currentScope; this.dynamicMappers = dynamicMappers; this.dynamicObjectMappers = dynamicObjectMappers; this.dynamicRuntimeFields = dynamicRuntimeFields; @@ -192,7 +202,7 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.ignoredFieldValues, in.ignoredFieldsMissingValues, in.inArrayScopeEnabled, - in.inArrayScope, + in.currentScope, in.dynamicMappers, in.dynamicObjectMappers, in.dynamicRuntimeFields, @@ -224,7 +234,7 @@ protected DocumentParserContext( new ArrayList<>(), new ArrayList<>(), mappingParserContext.getIndexSettings().isSyntheticSourceSecondDocParsingPassEnabled(), - false, + Scope.SINGLETON, new HashMap<>(), new HashMap<>(), new HashMap<>(), @@ -335,7 +345,7 @@ public final void deduplicateIgnoredFieldValues(final Set fullNames) { public final DocumentParserContext addIgnoredFieldFromContext(IgnoredSourceFieldMapper.NameValue ignoredFieldWithNoSource) throws IOException { if (canAddIgnoredField()) { - if (inArrayScope) { + if (currentScope == Scope.ARRAY) { // The field is an array within an array, store all sub-array elements. ignoredFieldsMissingValues.add(ignoredFieldWithNoSource); return cloneWithRecordedSource(); @@ -379,10 +389,10 @@ public final DocumentParserContext maybeCloneForArray(Mapper mapper) throws IOEx if (canAddIgnoredField() && mapper instanceof ObjectMapper && mapper instanceof NestedObjectMapper == false - && inArrayScope == false + && currentScope != Scope.ARRAY && inArrayScopeEnabled) { DocumentParserContext subcontext = switchParser(parser()); - subcontext.inArrayScope = true; + subcontext.currentScope = Scope.ARRAY; return subcontext; } return this; @@ -673,6 +683,10 @@ public boolean isWithinCopyTo() { return false; } + public boolean inNestedScope() { + return currentScope == Scope.NESTED; + } + public final DocumentParserContext createChildContext(ObjectMapper parent) { return new Wrapper(parent, this); } @@ -716,10 +730,11 @@ public LuceneDocument doc() { return document; } }; - // Disable tracking array scopes for ignored source, as it would be added to the parent doc. - // Nested documents are added to preserve object structure within arrays of objects, so the use - // of ignored source for arrays inside them should be mostly redundant. - cloned.inArrayScope = false; + + cloned.currentScope = Scope.NESTED; + // Disable using second parsing pass since it currently can not determine which parts + // of source belong to which nested document. + // See #115261. cloned.inArrayScopeEnabled = false; return cloned; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java index 296c2c5311d9a..70d73fc2ffb9a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapper.java @@ -58,6 +58,9 @@ public class IgnoredSourceFieldMapper extends MetadataFieldMapper { static final NodeFeature TRACK_IGNORED_SOURCE = new NodeFeature("mapper.track_ignored_source"); static final NodeFeature DONT_EXPAND_DOTS_IN_IGNORED_SOURCE = new NodeFeature("mapper.ignored_source.dont_expand_dots"); + static final NodeFeature ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS = new NodeFeature( + "mapper.ignored_source.always_store_object_arrays_in_nested" + ); /* Setting to disable encoding and writing values for this field. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java index 802680e7f373e..ecc708bc94614 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/KeywordFieldMapper.java @@ -634,8 +634,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { return new BlockStoredFieldsReader.BytesFromBytesRefsBlockLoader(name()); } SourceValueFetcher fetcher = sourceValueFetcher(blContext.sourcePaths(name())); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, sourceBlockLoaderLookup(blContext), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, sourceBlockLoaderLookup(blContext)); } private BlockSourceReader.LeafIteratorLookup sourceBlockLoaderLookup(BlockLoaderContext blContext) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java index 31c89b2fc8ad4..026c7c98d7aeb 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperFeatures.java @@ -62,7 +62,8 @@ public Set getTestFeatures() { return Set.of( RangeFieldMapper.DATE_RANGE_INDEXING_FIX, IgnoredSourceFieldMapper.DONT_EXPAND_DOTS_IN_IGNORED_SOURCE, - SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION + SourceFieldMapper.REMOVE_SYNTHETIC_SOURCE_ONLY_VALIDATION, + IgnoredSourceFieldMapper.ALWAYS_STORE_OBJECT_ARRAYS_IN_NESTED_OBJECTS ); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 3608e8ab261c1..55ed1e10428aa 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -462,12 +462,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, FLOAT("float", NumericType.FLOAT) { @@ -650,12 +646,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, DOUBLE("double", NumericType.DOUBLE) { @@ -804,12 +796,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup); } }, BYTE("byte", NumericType.BYTE) { @@ -921,12 +909,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1038,12 +1022,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1229,12 +1209,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup); } }, LONG("long", NumericType.LONG) { @@ -1380,12 +1356,8 @@ BlockLoader blockLoaderFromDocValues(String fieldName) { } @Override - BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ) { - return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup, sourceMode); + BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) { + return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup); } private boolean isOutOfRange(Object value) { @@ -1663,11 +1635,7 @@ protected void writeValue(XContentBuilder b, long value) throws IOException { abstract BlockLoader blockLoaderFromDocValues(String fieldName); - abstract BlockLoader blockLoaderFromSource( - SourceValueFetcher sourceValueFetcher, - BlockSourceReader.LeafIteratorLookup lookup, - SourceFieldMapper.Mode sourceMode - ); + abstract BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup); } public static class NumberFieldType extends SimpleMappedFieldType { @@ -1806,8 +1774,7 @@ public BlockLoader blockLoader(BlockLoaderContext blContext) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name())), lookup, sourceMode); + return type.blockLoaderFromSource(sourceValueFetcher(blContext.sourcePaths(name())), lookup); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 3f77edc819602..253f70f4fda47 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -1007,20 +1007,8 @@ protected String delegatingTo() { if (isStored()) { return new BlockStoredFieldsReader.BytesFromStringsBlockLoader(name()); } - if (isSyntheticSource && syntheticSourceDelegate == null) { - /* - * When we're in synthetic source mode we don't currently - * support text fields that are not stored and are not children - * of perfect keyword fields. We'd have to load from the parent - * field and then convert the result to a string. In this case, - * even if we would synthesize the source, the current field - * would be missing. - */ - return null; - } SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name())); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, blockReaderDisiLookup(blContext), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, blockReaderDisiLookup(blContext)); } /** diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 2dc5e7c28ad0b..706f788e8a310 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -38,7 +38,6 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource; @@ -1714,7 +1713,7 @@ interface IndexDeletionAllowedPredicate { IndexSettings indexSettings) -> canDeleteIndexContents(index); private final IndexDeletionAllowedPredicate ALWAYS_TRUE = (Index index, IndexSettings indexSettings) -> true; - public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { + public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { /* Being static, parseAliasFilter doesn't have access to whatever guts it needs to parse a query. Instead of passing in a bunch * of dependencies we pass in a function that can perform the parsing. */ CheckedFunction filterParser = bytes -> { diff --git a/server/src/main/java/org/elasticsearch/inference/InferenceService.java b/server/src/main/java/org/elasticsearch/inference/InferenceService.java index d437533a8603d..2c99563955746 100644 --- a/server/src/main/java/org/elasticsearch/inference/InferenceService.java +++ b/server/src/main/java/org/elasticsearch/inference/InferenceService.java @@ -210,4 +210,8 @@ default List defaultConfigIds() { default void defaultConfigs(ActionListener> defaultsListener) { defaultsListener.onResponse(List.of()); } + + default void updateModelsWithDynamicFields(List model, ActionListener> listener) { + listener.onResponse(model); + } } diff --git a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java index 5059272aa2e23..97a68d9807688 100644 --- a/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java +++ b/server/src/main/java/org/elasticsearch/ingest/ConfigurationUtils.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -239,7 +240,7 @@ private static Boolean readBoolean(String processorType, String processorTag, St processorType, processorTag, propertyName, - "property isn't a boolean, but of type [" + value.getClass().getName() + "]" + Strings.format("property isn't a boolean, but of type [%s]", value.getClass().getName()) ); } diff --git a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java index de172d86b810d..7406ee8837264 100644 --- a/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/elasticsearch/ingest/PipelineConfiguration.java @@ -99,18 +99,13 @@ BytesReference getConfig() { } public Integer getVersion() { - var configMap = getConfigAsMap(); - if (configMap.containsKey("version")) { - Object o = configMap.get("version"); - if (o == null) { - return null; - } else if (o instanceof Number number) { - return number.intValue(); - } else { - throw new IllegalStateException("unexpected version type [" + o.getClass().getName() + "]"); - } - } else { + Object o = getConfigAsMap().get("version"); + if (o == null) { return null; + } else if (o instanceof Number number) { + return number.intValue(); + } else { + throw new IllegalStateException("unexpected version type [" + o.getClass().getName() + "]"); } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 32a65302922a8..e30f76fdd9414 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -13,10 +13,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.search.TransportSearchAction; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.RefCountingListener; -import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; import org.elasticsearch.client.internal.Client; @@ -82,7 +78,6 @@ import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.tasks.TaskCancellationService; -import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.tasks.TaskResultsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterPortSettings; @@ -106,18 +101,12 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.function.BiConsumer; import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; import javax.net.ssl.SNIHostName; -import static org.elasticsearch.core.Strings.format; - /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used * in order to use a {@link Client} to perform actions/operations against the cluster. @@ -161,12 +150,6 @@ public class Node implements Closeable { Property.NodeScope ); - public static final Setting MAXIMUM_SHUTDOWN_TIMEOUT_SETTING = Setting.positiveTimeSetting( - "node.maximum_shutdown_grace_period", - TimeValue.ZERO, - Setting.Property.NodeScope - ); - private final Lifecycle lifecycle = new Lifecycle(); /** @@ -187,6 +170,7 @@ public class Node implements Closeable { private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; private final TerminationHandler terminationHandler; + // for testing final NamedWriteableRegistry namedWriteableRegistry; final NamedXContentRegistry namedXContentRegistry; @@ -606,105 +590,8 @@ public synchronized void close() throws IOException { * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. */ public void prepareForClose() { - final var maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(this.settings()); - - record Stopper(String name, SubscribableListener listener) { - boolean isIncomplete() { - return listener().isDone() == false; - } - } - - final var stoppers = new ArrayList(); - final var allStoppersFuture = new PlainActionFuture(); - try (var listeners = new RefCountingListener(allStoppersFuture)) { - final BiConsumer stopperRunner = (name, action) -> { - final var stopper = new Stopper(name, new SubscribableListener<>()); - stoppers.add(stopper); - stopper.listener().addListener(listeners.acquire()); - new Thread(() -> { - try { - action.run(); - } catch (Exception ex) { - logger.warn("unexpected exception in shutdown task [" + stopper.name() + "]", ex); - } finally { - stopper.listener().onResponse(null); - } - }, stopper.name()).start(); - }; - - stopperRunner.accept("http-server-transport-stop", injector.getInstance(HttpServerTransport.class)::close); - stopperRunner.accept("async-search-stop", () -> awaitSearchTasksComplete(maxTimeout)); - if (terminationHandler != null) { - stopperRunner.accept("termination-handler-stop", terminationHandler::handleTermination); - } - } - - final Supplier incompleteStoppersDescriber = () -> stoppers.stream() - .filter(Stopper::isIncomplete) - .map(Stopper::name) - .collect(Collectors.joining(", ", "[", "]")); - - try { - if (TimeValue.ZERO.equals(maxTimeout)) { - allStoppersFuture.get(); - } else { - allStoppersFuture.get(maxTimeout.millis(), TimeUnit.MILLISECONDS); - } - } catch (ExecutionException e) { - assert false : e; // listeners are never completed exceptionally - logger.warn("failed during graceful shutdown tasks", e); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - logger.warn("interrupted while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get(), e); - } catch (TimeoutException e) { - logger.warn("timed out while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get()); - } - } - - private void awaitSearchTasksComplete(TimeValue asyncSearchTimeout) { - TaskManager taskManager = injector.getInstance(TransportService.class).getTaskManager(); - long millisWaited = 0; - while (true) { - long searchTasksRemaining = taskManager.getTasks() - .values() - .stream() - .filter(task -> TransportSearchAction.TYPE.name().equals(task.getAction())) - .count(); - if (searchTasksRemaining == 0) { - logger.debug("all search tasks complete"); - return; - } else { - // Let the system work on those searches for a while. We're on a dedicated thread to manage app shutdown, so we - // literally just want to wait and not take up resources on this thread for now. Poll period chosen to allow short - // response times, but checking the tasks list is relatively expensive, and we don't want to waste CPU time we could - // be spending on finishing those searches. - final TimeValue pollPeriod = TimeValue.timeValueMillis(500); - millisWaited += pollPeriod.millis(); - if (TimeValue.ZERO.equals(asyncSearchTimeout) == false && millisWaited >= asyncSearchTimeout.millis()) { - logger.warn( - format( - "timed out after waiting [%s] for [%d] search tasks to finish", - asyncSearchTimeout.toString(), - searchTasksRemaining - ) - ); - return; - } - logger.debug(format("waiting for [%s] search tasks to finish, next poll in [%s]", searchTasksRemaining, pollPeriod)); - try { - Thread.sleep(pollPeriod.millis()); - } catch (InterruptedException ex) { - logger.warn( - format( - "interrupted while waiting [%s] for [%d] search tasks to finish", - asyncSearchTimeout.toString(), - searchTasksRemaining - ) - ); - return; - } - } - } + injector.getInstance(ShutdownPrepareService.class) + .prepareForShutdown(injector.getInstance(TransportService.class).getTaskManager()); } /** diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 8e66486329577..7e3991c1df1f4 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -1099,6 +1099,8 @@ private void construct( telemetryProvider.getTracer() ); + final ShutdownPrepareService shutdownPrepareService = new ShutdownPrepareService(settings, httpServerTransport, terminationHandler); + modules.add( loadPersistentTasksService( settingsModule, @@ -1200,6 +1202,7 @@ private void construct( b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); b.bind(DataStreamAutoShardingService.class).toInstance(dataStreamAutoShardingService); b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics); + b.bind(ShutdownPrepareService.class).toInstance(shutdownPrepareService); }); if (ReadinessService.enabled(environment)) { diff --git a/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java b/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java new file mode 100644 index 0000000000000..ab9537053f45d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/node/ShutdownPrepareService.java @@ -0,0 +1,184 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.node; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.node.internal.TerminationHandler; +import org.elasticsearch.tasks.TaskManager; + +import java.util.ArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.elasticsearch.core.Strings.format; + +/** + * This class was created to extract out the logic from {@link Node#prepareForClose()} to facilitate testing. + *

+ * Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down + * gracefully from the underlying operating system, before system resources are closed. + *

+ * Note that this class is part of infrastructure to react to signals from the operating system - most graceful shutdown + * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. + */ +public class ShutdownPrepareService { + + private final Logger logger = LogManager.getLogger(ShutdownPrepareService.class); + private final Settings settings; + private final HttpServerTransport httpServerTransport; + private final TerminationHandler terminationHandler; + private volatile boolean hasBeenShutdown = false; + + public ShutdownPrepareService(Settings settings, HttpServerTransport httpServerTransport, TerminationHandler terminationHandler) { + this.settings = settings; + this.httpServerTransport = httpServerTransport; + this.terminationHandler = terminationHandler; + } + + public static final Setting MAXIMUM_SHUTDOWN_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "node.maximum_shutdown_grace_period", + TimeValue.ZERO, + Setting.Property.NodeScope + ); + + public static final Setting MAXIMUM_REINDEXING_TIMEOUT_SETTING = Setting.positiveTimeSetting( + "node.maximum_reindexing_grace_period", + TimeValue.timeValueSeconds(10), + Setting.Property.NodeScope + ); + + /** + * Invokes hooks to prepare this node to be closed. This should be called when Elasticsearch receives a request to shut down + * gracefully from the underlying operating system, before system resources are closed. This method will block + * until the node is ready to shut down. + *

+ * Note that this class is part of infrastructure to react to signals from the operating system - most graceful shutdown + * logic should use Node Shutdown, see {@link org.elasticsearch.cluster.metadata.NodesShutdownMetadata}. + */ + public void prepareForShutdown(TaskManager taskManager) { + assert hasBeenShutdown == false; + hasBeenShutdown = true; + final var maxTimeout = MAXIMUM_SHUTDOWN_TIMEOUT_SETTING.get(settings); + final var reindexTimeout = MAXIMUM_REINDEXING_TIMEOUT_SETTING.get(settings); + + record Stopper(String name, SubscribableListener listener) { + boolean isIncomplete() { + return listener().isDone() == false; + } + } + + final var stoppers = new ArrayList(); + final var allStoppersFuture = new PlainActionFuture(); + try (var listeners = new RefCountingListener(allStoppersFuture)) { + final BiConsumer stopperRunner = (name, action) -> { + final var stopper = new Stopper(name, new SubscribableListener<>()); + stoppers.add(stopper); + stopper.listener().addListener(listeners.acquire()); + new Thread(() -> { + try { + action.run(); + } catch (Exception ex) { + logger.warn("unexpected exception in shutdown task [" + stopper.name() + "]", ex); + } finally { + stopper.listener().onResponse(null); + } + }, stopper.name()).start(); + }; + + stopperRunner.accept("http-server-transport-stop", httpServerTransport::close); + stopperRunner.accept("async-search-stop", () -> awaitSearchTasksComplete(maxTimeout, taskManager)); + stopperRunner.accept("reindex-stop", () -> awaitReindexTasksComplete(reindexTimeout, taskManager)); + if (terminationHandler != null) { + stopperRunner.accept("termination-handler-stop", terminationHandler::handleTermination); + } + } + + final Supplier incompleteStoppersDescriber = () -> stoppers.stream() + .filter(Stopper::isIncomplete) + .map(Stopper::name) + .collect(Collectors.joining(", ", "[", "]")); + + try { + if (TimeValue.ZERO.equals(maxTimeout)) { + allStoppersFuture.get(); + } else { + allStoppersFuture.get(maxTimeout.millis(), TimeUnit.MILLISECONDS); + } + } catch (ExecutionException e) { + assert false : e; // listeners are never completed exceptionally + logger.warn("failed during graceful shutdown tasks", e); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.warn("interrupted while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get(), e); + } catch (TimeoutException e) { + logger.warn("timed out while waiting for graceful shutdown tasks: " + incompleteStoppersDescriber.get()); + } + } + + private void awaitTasksComplete(TimeValue timeout, String taskName, TaskManager taskManager) { + long millisWaited = 0; + while (true) { + long tasksRemaining = taskManager.getTasks().values().stream().filter(task -> taskName.equals(task.getAction())).count(); + if (tasksRemaining == 0) { + logger.debug("all " + taskName + " tasks complete"); + return; + } else { + // Let the system work on those tasks for a while. We're on a dedicated thread to manage app shutdown, so we + // literally just want to wait and not take up resources on this thread for now. Poll period chosen to allow short + // response times, but checking the tasks list is relatively expensive, and we don't want to waste CPU time we could + // be spending on finishing those tasks. + final TimeValue pollPeriod = TimeValue.timeValueMillis(500); + millisWaited += pollPeriod.millis(); + if (TimeValue.ZERO.equals(timeout) == false && millisWaited >= timeout.millis()) { + logger.warn( + format("timed out after waiting [%s] for [%d] " + taskName + " tasks to finish", timeout.toString(), tasksRemaining) + ); + return; + } + logger.debug(format("waiting for [%s] " + taskName + " tasks to finish, next poll in [%s]", tasksRemaining, pollPeriod)); + try { + Thread.sleep(pollPeriod.millis()); + } catch (InterruptedException ex) { + logger.warn( + format( + "interrupted while waiting [%s] for [%d] " + taskName + " tasks to finish", + timeout.toString(), + tasksRemaining + ) + ); + return; + } + } + } + } + + private void awaitSearchTasksComplete(TimeValue asyncSearchTimeout, TaskManager taskManager) { + awaitTasksComplete(asyncSearchTimeout, TransportSearchAction.NAME, taskManager); + } + + private void awaitReindexTasksComplete(TimeValue asyncReindexTimeout, TaskManager taskManager) { + awaitTasksComplete(asyncReindexTimeout, ReindexAction.NAME, taskManager); + } + +} diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 811b59465ce76..601fc3c86d98f 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.coordination.FailedToCommitClusterStateException; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; @@ -144,7 +145,16 @@ private void processFileChanges(ReservedStateVersionCheck versionCheck) throws I } @Override - protected void processInitialFileMissing() throws ExecutionException, InterruptedException { + protected void onProcessFileChangesException(Exception e) { + if (e instanceof ExecutionException && e.getCause() instanceof FailedToCommitClusterStateException f) { + logger.error("Unable to commit cluster state", e); + } else { + super.onProcessFileChangesException(e); + } + } + + @Override + protected void processInitialFileMissing() throws ExecutionException, InterruptedException, IOException { PlainActionFuture completion = new PlainActionFuture<>(); logger.info("setting file [{}] not found, initializing [{}] as empty", watchedFile(), NAMESPACE); stateService.initEmpty(NAMESPACE, completion); diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java index 92e248f160f0f..c85997f72cc78 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/ReservedStateUpdateTask.java @@ -205,8 +205,8 @@ static boolean checkMetadataVersion( namespace, newVersion, switch (versionCheck) { - case ReservedStateVersionCheck.HIGHER_OR_SAME_VERSION -> "less than"; - case ReservedStateVersionCheck.HIGHER_VERSION_ONLY -> "less than or equal to"; + case HIGHER_OR_SAME_VERSION -> "less than"; + case HIGHER_VERSION_ONLY -> "less than or equal to"; }, currentVersion ) diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 3a900a8a9b8a6..be96b4e25d841 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.CheckedSupplier; @@ -1619,7 +1618,7 @@ public boolean isForceExecution() { } } - public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { + public AliasFilter buildAliasFilter(ClusterState state, String index, Set resolvedExpressions) { return indicesService.buildAliasFilter(state, index, resolvedExpressions); } diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 6ceb02f0e797f..9c96319136007 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -2179,7 +2179,7 @@ public ActionRequestValidationException validate( boolean allowPartialSearchResults ) { if (retriever() != null) { - validationException = retriever().validate(this, validationException, allowPartialSearchResults); + validationException = retriever().validate(this, validationException, isScroll, allowPartialSearchResults); List specified = new ArrayList<>(); if (subSearches().isEmpty() == false) { specified.add(QUERY_FIELD.getPreferredName()); diff --git a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java index e994c55e43452..b15798db95b6f 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/CompoundRetrieverBuilder.java @@ -11,6 +11,8 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.search.MultiSearchRequest; @@ -18,9 +20,9 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.TransportMultiSearchAction; -import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.StoredFieldsContext; @@ -122,10 +124,17 @@ public final RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOExceptio public void onResponse(MultiSearchResponse items) { List topDocs = new ArrayList<>(); List failures = new ArrayList<>(); + // capture the max status code returned by any of the responses + int statusCode = RestStatus.OK.getStatus(); + List retrieversWithFailures = new ArrayList<>(); for (int i = 0; i < items.getResponses().length; i++) { var item = items.getResponses()[i]; if (item.isFailure()) { failures.add(item.getFailure()); + retrieversWithFailures.add(innerRetrievers.get(i).retriever().getName()); + if (ExceptionsHelper.status(item.getFailure()).getStatus() > statusCode) { + statusCode = ExceptionsHelper.status(item.getFailure()).getStatus(); + } } else { assert item.getResponse() != null; var rankDocs = getRankDocs(item.getResponse()); @@ -134,7 +143,14 @@ public void onResponse(MultiSearchResponse items) { } } if (false == failures.isEmpty()) { - IllegalStateException ex = new IllegalStateException("Search failed - some nested retrievers returned errors."); + assert statusCode != RestStatus.OK.getStatus(); + final String errMessage = "[" + + getName() + + "] search failed - retrievers '" + + retrieversWithFailures + + "' returned errors. " + + "All failures are attached as suppressed exceptions."; + Exception ex = new ElasticsearchStatusException(errMessage, RestStatus.fromCode(statusCode)); failures.forEach(ex::addSuppressed); listener.onFailure(ex); } else { @@ -163,6 +179,11 @@ public final QueryBuilder topDocsQuery() { throw new IllegalStateException("Should not be called, missing a rewrite?"); } + @Override + public final QueryBuilder explainQuery() { + throw new IllegalStateException("Should not be called, missing a rewrite?"); + } + @Override public final void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { throw new IllegalStateException("Should not be called, missing a rewrite?"); @@ -172,9 +193,10 @@ public final void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceB public ActionRequestValidationException validate( SearchSourceBuilder source, ActionRequestValidationException validationException, + boolean isScroll, boolean allowPartialSearchResults ) { - validationException = super.validate(source, validationException, allowPartialSearchResults); + validationException = super.validate(source, validationException, isScroll, allowPartialSearchResults); if (source.size() > rankWindowSize) { validationException = addValidationError( "[" @@ -190,12 +212,15 @@ public ActionRequestValidationException validate( } if (allowPartialSearchResults) { validationException = addValidationError( - "cannot specify a compound retriever and [allow_partial_search_results]", + "cannot specify [" + getName() + "] and [allow_partial_search_results]", validationException ); } + if (isScroll) { + validationException = addValidationError("cannot specify [" + getName() + "] and [scroll]", validationException); + } for (RetrieverSource innerRetriever : innerRetrievers) { - validationException = innerRetriever.retriever().validate(source, validationException, allowPartialSearchResults); + validationException = innerRetriever.retriever().validate(source, validationException, isScroll, allowPartialSearchResults); } return validationException; } @@ -216,22 +241,12 @@ protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, .trackTotalHits(false) .storedFields(new StoredFieldsContext(false)) .size(rankWindowSize); + // apply the pre-filters downstream once if (preFilterQueryBuilders.isEmpty() == false) { retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); } retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, true); - // apply the pre-filters - if (preFilterQueryBuilders.size() > 0) { - QueryBuilder query = sourceBuilder.query(); - BoolQueryBuilder newQuery = new BoolQueryBuilder(); - if (query != null) { - newQuery.must(query); - } - preFilterQueryBuilders.forEach(newQuery::filter); - sourceBuilder.query(newQuery); - } - // Record the shard id in the sort result List> sortBuilders = sourceBuilder.sorts() != null ? new ArrayList<>(sourceBuilder.sorts()) : new ArrayList<>(); if (sortBuilders.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java index 882d44adb79c3..5e36ad0fd4fd6 100644 --- a/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/retriever/RetrieverBuilder.java @@ -239,6 +239,7 @@ public RetrieverBuilder rewrite(QueryRewriteContext ctx) throws IOException { public ActionRequestValidationException validate( SearchSourceBuilder source, ActionRequestValidationException validationException, + boolean isScroll, boolean allowPartialSearchResults ) { return validationException; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java index 1faeabb6acbf7..834bacd9e6a04 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -230,19 +229,9 @@ public void testResolveHiddenProperlyWithDateMath() { .metadata(buildMetadata(new Object[][] {}, indices)) .build(); String[] requestedIndex = new String[] { "" }; - Set resolvedIndices = resolver.resolveExpressions( - clusterState, - IndicesOptions.LENIENT_EXPAND_OPEN, - true, - requestedIndex - ); + Set resolvedIndices = resolver.resolveExpressions(clusterState, IndicesOptions.LENIENT_EXPAND_OPEN, true, requestedIndex); assertThat(resolvedIndices.size(), is(1)); - assertThat( - resolvedIndices, - contains( - oneOf(new ResolvedExpression("logs-pgsql-prod-" + todaySuffix), new ResolvedExpression("logs-pgsql-prod-" + tomorrowSuffix)) - ) - ); + assertThat(resolvedIndices, contains(oneOf("logs-pgsql-prod-" + todaySuffix, "logs-pgsql-prod-" + tomorrowSuffix))); } public void testSystemIndexAccess() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java index 74408b99e92ce..95446149f026b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/post/TransportSimulateIndexTemplateActionTests.java @@ -49,7 +49,9 @@ public void testSettingsProviderIsOverridden() throws Exception { matchingTemplate, ComposableIndexTemplate.builder() .indexPatterns(List.of("test_index*")) - .template(new Template(Settings.builder().put("test-setting", 1).build(), null, null)) + .template( + new Template(Settings.builder().put("test-setting", 1).put("test-setting-2", 2).build(), null, null) + ) .build() ) ) @@ -78,6 +80,24 @@ public Settings getAdditionalIndexSettings( ) { return Settings.builder().put("test-setting", 0).build(); } + }, new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("test-setting-2", 10).build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } }); Template resolvedTemplate = TransportSimulateIndexTemplateAction.resolveTemplate( @@ -92,5 +112,6 @@ public Settings getAdditionalIndexSettings( ); assertThat(resolvedTemplate.settings().getAsInt("test-setting", -1), is(1)); + assertThat(resolvedTemplate.settings().getAsInt("test-setting-2", -1), is(10)); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index c6ca97fd5694a..526961d74bf52 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -291,10 +291,23 @@ public void testValidate() throws IOException { assertNotNull(validationErrors); assertEquals(1, validationErrors.validationErrors().size()); assertEquals( - "cannot specify a compound retriever and [allow_partial_search_results]", + "cannot specify [test_compound_retriever_builder] and [allow_partial_search_results]", validationErrors.validationErrors().get(0) ); } + { + // scroll and compound retriever + SearchRequest searchRequest = createSearchRequest().source( + new SearchSourceBuilder().retriever(new TestCompoundRetrieverBuilder(randomIntBetween(1, 10))) + ); + searchRequest.allowPartialSearchResults(false); + searchRequest.scroll(TimeValue.timeValueMinutes(1)); + searchRequest.requestCache(false); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("cannot specify [test_compound_retriever_builder] and [scroll]", validationErrors.validationErrors().get(0)); + } { // allow_partial_results and non-compound retriever SearchRequest searchRequest = createSearchRequest().source(new SearchSourceBuilder().retriever(new RetrieverBuilder() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinitionTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinitionTests.java new file mode 100644 index 0000000000000..38d4031755a55 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamFailureStoreDefinitionTests.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.cluster.metadata.DataStreamFailureStoreDefinition.INDEX_FAILURE_STORE_VERSION_SETTING_NAME; +import static org.hamcrest.Matchers.equalTo; + +public class DataStreamFailureStoreDefinitionTests extends ESTestCase { + + public void testSettingsFiltering() { + // Empty + Settings.Builder builder = Settings.builder(); + Settings.Builder expectedBuilder = Settings.builder(); + assertThat(DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys(), equalTo(expectedBuilder.keys())); + + // All supported settings + builder.put(INDEX_FAILURE_STORE_VERSION_SETTING_NAME, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(DataTier.TIER_PREFERENCE, "data_cold") + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-10") + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put(IndexMetadata.LIFECYCLE_NAME, "my-policy") + .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)); + // We expect no changes + expectedBuilder = Settings.builder().put(builder.build()); + assertThat(DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys(), equalTo(expectedBuilder.keys())); + + // Remove unsupported settings + String randomSetting = randomAlphaOfLength(10); + builder.put(INDEX_FAILURE_STORE_VERSION_SETTING_NAME, 3) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(DataTier.TIER_PREFERENCE, "data_cold") + .put(IndexMetadata.SETTING_INDEX_HIDDEN, true) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-10") + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put(IndexMetadata.LIFECYCLE_NAME, "my-policy") + .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "." + randomAlphaOfLength(4), randomAlphaOfLength(4)) + .put(IndexSettings.MODE.getKey(), randomFrom(IndexMode.values())) + .put(randomSetting, randomAlphaOfLength(10)); + // We expect no changes + expectedBuilder = Settings.builder().put(builder.build()); + assertThat( + DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys().size(), + equalTo(expectedBuilder.keys().size() - 2) + ); + assertThat( + DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys().contains(IndexSettings.MODE.getKey()), + equalTo(false) + ); + assertThat(DataStreamFailureStoreDefinition.filterUserDefinedSettings(builder).keys().contains(randomSetting), equalTo(false)); + } + +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index fe0b7926229cb..6be5b48f9d723 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.DateMathExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -27,6 +26,7 @@ import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Locale; @@ -52,11 +52,11 @@ private static String formatDate(String pattern, ZonedDateTime zonedDateTime) { public void testNormal() throws Exception { int numIndexExpressions = randomIntBetween(1, 9); - List indexExpressions = new ArrayList<>(numIndexExpressions); + List indexExpressions = new ArrayList<>(numIndexExpressions); for (int i = 0; i < numIndexExpressions; i++) { - indexExpressions.add(new ResolvedExpression(randomAlphaOfLength(10))); + indexExpressions.add(randomAlphaOfLength(10)); } - List result = DateMathExpressionResolver.resolve(context, indexExpressions); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(indexExpressions.size())); for (int i = 0; i < indexExpressions.size(); i++) { assertThat(result.get(i), equalTo(indexExpressions.get(i))); @@ -64,25 +64,25 @@ public void testNormal() throws Exception { } public void testExpression() throws Exception { - List indexExpressions = resolvedExpressions("<.marvel-{now}>", "<.watch_history-{now}>", ""); - List result = DateMathExpressionResolver.resolve(context, indexExpressions); + List indexExpressions = Arrays.asList("<.marvel-{now}>", "<.watch_history-{now}>", ""); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(3)); - assertThat(result.get(0).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(1).resource(), equalTo(".watch_history-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(2).resource(), equalTo("logstash-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(1), equalTo(".watch_history-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(2), equalTo("logstash-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpressionWithWildcardAndExclusions() { - List indexExpressions = resolvedExpressions( + List indexExpressions = Arrays.asList( "<-before-inner-{now}>", "-", "", "<-after-inner-{now}>", "-" ); - List result = DateMathExpressionResolver.resolve(context, indexExpressions); + List result = DateMathExpressionResolver.resolve(context, indexExpressions); assertThat( - result.stream().map(ResolvedExpression::resource).toList(), + result, Matchers.contains( equalTo("-before-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), equalTo("-"), // doesn't evaluate because it doesn't start with "<" and it is not an exclusion @@ -98,7 +98,7 @@ public void testExpressionWithWildcardAndExclusions() { ); result = DateMathExpressionResolver.resolve(noWildcardExpandContext, indexExpressions); assertThat( - result.stream().map(ResolvedExpression::resource).toList(), + result, Matchers.contains( equalTo("-before-inner-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime()))), // doesn't evaluate because it doesn't start with "<" and there can't be exclusions without wildcard expansion @@ -112,24 +112,21 @@ public void testExpressionWithWildcardAndExclusions() { } public void testEmpty() throws Exception { - List result = DateMathExpressionResolver.resolve(context, List.of()); + List result = DateMathExpressionResolver.resolve(context, Collections.emptyList()); assertThat(result.size(), equalTo(0)); } public void testExpression_Static() throws Exception { - List result = DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-test>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-test>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0).resource(), equalTo(".marvel-test")); + assertThat(result.get(0), equalTo(".marvel-test")); } public void testExpression_MultiParts() throws Exception { - List result = DateMathExpressionResolver.resolve( - context, - resolvedExpressions("<.text1-{now/d}-text2-{now/M}>") - ); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.text1-{now/d}-text2-{now/M}>")); assertThat(result.size(), equalTo(1)); assertThat( - result.get(0).resource(), + result.get(0), equalTo( ".text1-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())) @@ -140,42 +137,33 @@ public void testExpression_MultiParts() throws Exception { } public void testExpression_CustomFormat() throws Exception { - List results = DateMathExpressionResolver.resolve( - context, - resolvedExpressions("<.marvel-{now/d{yyyy.MM.dd}}>") - ); + List results = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd}}>")); assertThat(results.size(), equalTo(1)); - assertThat(results.get(0).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpression_EscapeStatic() throws Exception { - List result = DateMathExpressionResolver.resolve(context, resolvedExpressions("<.mar\\{v\\}el-{now/d}>")); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0).resource(), equalTo(".mar{v}el-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(0), equalTo(".mar{v}el-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); } public void testExpression_EscapeDateFormat() throws Exception { - List result = DateMathExpressionResolver.resolve( - context, - resolvedExpressions("<.marvel-{now/d{'\\{year\\}'yyyy}}>") - ); + List result = DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'yyyy}}>")); assertThat(result.size(), equalTo(1)); - assertThat(result.get(0).resource(), equalTo(".marvel-" + formatDate("'{year}'yyyy", dateFromMillis(context.getStartTime())))); + assertThat(result.get(0), equalTo(".marvel-" + formatDate("'{year}'yyyy", dateFromMillis(context.getStartTime())))); } public void testExpression_MixedArray() throws Exception { - List result = DateMathExpressionResolver.resolve( + List result = DateMathExpressionResolver.resolve( context, - resolvedExpressions("name1", "<.marvel-{now/d}>", "name2", "<.logstash-{now/M{uuuu.MM}}>") + Arrays.asList("name1", "<.marvel-{now/d}>", "name2", "<.logstash-{now/M{uuuu.MM}}>") ); assertThat(result.size(), equalTo(4)); - assertThat(result.get(0).resource(), equalTo("name1")); - assertThat(result.get(1).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); - assertThat(result.get(2).resource(), equalTo("name2")); - assertThat( - result.get(3).resource(), - equalTo(".logstash-" + formatDate("uuuu.MM", dateFromMillis(context.getStartTime()).withDayOfMonth(1))) - ); + assertThat(result.get(0), equalTo("name1")); + assertThat(result.get(1), equalTo(".marvel-" + formatDate("uuuu.MM.dd", dateFromMillis(context.getStartTime())))); + assertThat(result.get(2), equalTo("name2")); + assertThat(result.get(3), equalTo(".logstash-" + formatDate("uuuu.MM", dateFromMillis(context.getStartTime()).withDayOfMonth(1)))); } public void testExpression_CustomTimeZoneInIndexName() throws Exception { @@ -214,19 +202,19 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { name -> false, name -> false ); - List results = DateMathExpressionResolver.resolve( + List results = DateMathExpressionResolver.resolve( context, - resolvedExpressions("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>") + Arrays.asList("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getId() + "}}>") ); assertThat(results.size(), equalTo(1)); logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, results.get(0)); - assertThat(results.get(0).resource(), equalTo(".marvel-" + formatDate("uuuu.MM.dd", now.withZoneSameInstant(timeZone)))); + assertThat(results.get(0), equalTo(".marvel-" + formatDate("uuuu.MM.dd", now.withZoneSameInstant(timeZone)))); } public void testExpressionInvalidUnescaped() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.mar}vel-{now/d}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.mar}vel-{now/d}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("invalid character at position [")); @@ -235,7 +223,7 @@ public void testExpressionInvalidUnescaped() throws Exception { public void testExpressionInvalidDateMathFormat() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-{now/d{}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); @@ -244,7 +232,7 @@ public void testExpressionInvalidDateMathFormat() throws Exception { public void testExpressionInvalidEmptyDateMathFormat() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-{now/d{}}>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{}}>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("missing date format")); @@ -253,13 +241,10 @@ public void testExpressionInvalidEmptyDateMathFormat() throws Exception { public void testExpressionInvalidOpenEnded() throws Exception { Exception e = expectThrows( ElasticsearchParseException.class, - () -> DateMathExpressionResolver.resolve(context, resolvedExpressions("<.marvel-{now/d>")) + () -> DateMathExpressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d>")) ); assertThat(e.getMessage(), containsString("invalid dynamic name expression")); assertThat(e.getMessage(), containsString("date math placeholder is open ended")); } - private List resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).toList(); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java index 1df3bf4132b60..1ca59ff402bd8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ExpressionListTests.java @@ -13,12 +13,10 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ExpressionList.Expression; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; -import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.function.Supplier; @@ -41,13 +39,10 @@ public void testEmpty() { public void testExplicitSingleNameExpression() { for (IndicesOptions indicesOptions : List.of(getExpandWildcardsIndicesOptions(), getNoExpandWildcardsIndicesOptions())) { for (String expressionString : List.of("non_wildcard", "-non_exclusion")) { - ExpressionList expressionList = new ExpressionList( - getContextWithOptions(indicesOptions), - resolvedExpressions(expressionString) - ); + ExpressionList expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); assertThat(expressionList.hasWildcard(), is(false)); if (randomBoolean()) { - expressionList = new ExpressionList(getContextWithOptions(indicesOptions), resolvedExpressions((expressionString))); + expressionList = new ExpressionList(getContextWithOptions(indicesOptions), List.of(expressionString)); } Iterator expressionIterator = expressionList.iterator(); assertThat(expressionIterator.hasNext(), is(true)); @@ -67,14 +62,11 @@ public void testWildcardSingleExpression() { for (String wildcardTest : List.of("*", "a*", "*b", "a*b", "a-*b", "a*-b", "-*", "-a*", "-*b", "**", "*-*")) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getExpandWildcardsIndicesOptions()), - resolvedExpressions(wildcardTest) + List.of(wildcardTest) ); assertThat(expressionList.hasWildcard(), is(true)); if (randomBoolean()) { - expressionList = new ExpressionList( - getContextWithOptions(getExpandWildcardsIndicesOptions()), - resolvedExpressions(wildcardTest) - ); + expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), List.of(wildcardTest)); } Iterator expressionIterator = expressionList.iterator(); assertThat(expressionIterator.hasNext(), is(true)); @@ -90,13 +82,13 @@ public void testWildcardSingleExpression() { } public void testWildcardLongerExpression() { - List onlyExplicits = randomList(7, () -> new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); - ResolvedExpression wildcard = new ResolvedExpression(randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**")); - List expressionList = new ArrayList<>(onlyExplicits.size() + 1); + List onlyExplicits = randomList(7, () -> randomAlphaOfLengthBetween(0, 5)); + String wildcard = randomFrom("*", "*b", "-*", "*-", "c*", "a*b", "**"); + List expressionList = new ArrayList<>(onlyExplicits.size() + 1); expressionList.addAll(randomSubsetOf(onlyExplicits)); int wildcardPos = expressionList.size(); expressionList.add(wildcard); - for (ResolvedExpression item : onlyExplicits) { + for (String item : onlyExplicits) { if (expressionList.contains(item) == false) { expressionList.add(item); } @@ -114,18 +106,18 @@ public void testWildcardLongerExpression() { } else { assertThat(expression.isWildcard(), is(true)); } - assertThat(expression.get(), is(expressionList.get(i++).resource())); + assertThat(expression.get(), is(expressionList.get(i++))); } } public void testWildcardsNoExclusionExpressions() { - for (List wildcardExpression : List.of( - resolvedExpressions("*"), - resolvedExpressions("a", "*"), - resolvedExpressions("-b", "*c"), - resolvedExpressions("-", "a", "c*"), - resolvedExpressions("*", "a*", "*b"), - resolvedExpressions("-*", "a", "b*") + for (List wildcardExpression : List.of( + List.of("*"), + List.of("a", "*"), + List.of("-b", "*c"), + List.of("-", "a", "c*"), + List.of("*", "a*", "*b"), + List.of("-*", "a", "b*") )) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getExpandWildcardsIndicesOptions()), @@ -138,25 +130,25 @@ public void testWildcardsNoExclusionExpressions() { int i = 0; for (Expression expression : expressionList) { assertThat(expression.isExclusion(), is(false)); - if (wildcardExpression.get(i).resource().contains("*")) { + if (wildcardExpression.get(i).contains("*")) { assertThat(expression.isWildcard(), is(true)); } else { assertThat(expression.isWildcard(), is(false)); } - assertThat(expression.get(), is(wildcardExpression.get(i++).resource())); + assertThat(expression.get(), is(wildcardExpression.get(i++))); } } } public void testWildcardExpressionNoExpandOptions() { - for (List wildcardExpression : List.of( - resolvedExpressions("*"), - resolvedExpressions("a", "*"), - resolvedExpressions("-b", "*c"), - resolvedExpressions("*d", "-"), - resolvedExpressions("*", "-*"), - resolvedExpressions("-", "a", "c*"), - resolvedExpressions("*", "a*", "*b") + for (List wildcardExpression : List.of( + List.of("*"), + List.of("a", "*"), + List.of("-b", "*c"), + List.of("*d", "-"), + List.of("*", "-*"), + List.of("-", "a", "c*"), + List.of("*", "a*", "*b") )) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getNoExpandWildcardsIndicesOptions()), @@ -170,7 +162,7 @@ public void testWildcardExpressionNoExpandOptions() { for (Expression expression : expressionList) { assertThat(expression.isWildcard(), is(false)); assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(wildcardExpression.get(i++).resource())); + assertThat(expression.get(), is(wildcardExpression.get(i++))); } } } @@ -180,17 +172,17 @@ public void testSingleExclusionExpression() { int wildcardPos = randomIntBetween(0, 3); String exclusion = randomFrom("-*", "-", "-c*", "-ab", "--"); int exclusionPos = randomIntBetween(wildcardPos + 1, 7); - List exclusionExpression = new ArrayList<>(); + List exclusionExpression = new ArrayList<>(); for (int i = 0; i < wildcardPos; i++) { - exclusionExpression.add(new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); + exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); } - exclusionExpression.add(new ResolvedExpression(wildcard)); + exclusionExpression.add(wildcard); for (int i = wildcardPos + 1; i < exclusionPos; i++) { - exclusionExpression.add(new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); + exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); } - exclusionExpression.add(new ResolvedExpression(exclusion)); + exclusionExpression.add(exclusion); for (int i = 0; i < randomIntBetween(0, 3); i++) { - exclusionExpression.add(new ResolvedExpression(randomAlphaOfLengthBetween(0, 5))); + exclusionExpression.add(randomAlphaOfLengthBetween(0, 5)); } ExpressionList expressionList = new ExpressionList(getContextWithOptions(getExpandWildcardsIndicesOptions()), exclusionExpression); if (randomBoolean()) { @@ -201,28 +193,28 @@ public void testSingleExclusionExpression() { if (i == wildcardPos) { assertThat(expression.isWildcard(), is(true)); assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++).resource())); + assertThat(expression.get(), is(exclusionExpression.get(i++))); } else if (i == exclusionPos) { assertThat(expression.isExclusion(), is(true)); - assertThat(expression.isWildcard(), is(exclusionExpression.get(i).resource().contains("*"))); - assertThat(expression.get(), is(exclusionExpression.get(i++).resource().substring(1))); + assertThat(expression.isWildcard(), is(exclusionExpression.get(i).contains("*"))); + assertThat(expression.get(), is(exclusionExpression.get(i++).substring(1))); } else { assertThat(expression.isWildcard(), is(false)); assertThat(expression.isExclusion(), is(false)); - assertThat(expression.get(), is(exclusionExpression.get(i++).resource())); + assertThat(expression.get(), is(exclusionExpression.get(i++))); } } } public void testExclusionsExpression() { - for (Tuple, List> exclusionExpression : List.of( - new Tuple<>(resolvedExpressions("-a", "*", "-a"), List.of(false, false, true)), - new Tuple<>(resolvedExpressions("-b*", "c", "-a"), List.of(false, false, true)), - new Tuple<>(resolvedExpressions("*d", "-", "*b"), List.of(false, true, false)), - new Tuple<>(resolvedExpressions("-", "--", "-*", "", "-*"), List.of(false, false, false, false, true)), - new Tuple<>(resolvedExpressions("*-", "-*", "a", "-b"), List.of(false, true, false, true)), - new Tuple<>(resolvedExpressions("a", "-b", "-*", "-b", "*", "-b"), List.of(false, false, false, true, false, true)), - new Tuple<>(resolvedExpressions("-a", "*d", "-a", "-*b", "-b", "--"), List.of(false, false, true, true, true, true)) + for (Tuple, List> exclusionExpression : List.of( + new Tuple<>(List.of("-a", "*", "-a"), List.of(false, false, true)), + new Tuple<>(List.of("-b*", "c", "-a"), List.of(false, false, true)), + new Tuple<>(List.of("*d", "-", "*b"), List.of(false, true, false)), + new Tuple<>(List.of("-", "--", "-*", "", "-*"), List.of(false, false, false, false, true)), + new Tuple<>(List.of("*-", "-*", "a", "-b"), List.of(false, true, false, true)), + new Tuple<>(List.of("a", "-b", "-*", "-b", "*", "-b"), List.of(false, false, false, true, false, true)), + new Tuple<>(List.of("-a", "*d", "-a", "-*b", "-b", "--"), List.of(false, false, true, true, true, true)) )) { ExpressionList expressionList = new ExpressionList( getContextWithOptions(getExpandWildcardsIndicesOptions()), @@ -235,11 +227,11 @@ public void testExclusionsExpression() { for (Expression expression : expressionList) { boolean isExclusion = exclusionExpression.v2().get(i); assertThat(expression.isExclusion(), is(isExclusion)); - assertThat(expression.isWildcard(), is(exclusionExpression.v1().get(i).resource().contains("*"))); + assertThat(expression.isWildcard(), is(exclusionExpression.v1().get(i).contains("*"))); if (isExclusion) { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++).resource().substring(1))); + assertThat(expression.get(), is(exclusionExpression.v1().get(i++).substring(1))); } else { - assertThat(expression.get(), is(exclusionExpression.v1().get(i++).resource())); + assertThat(expression.get(), is(exclusionExpression.v1().get(i++))); } } } @@ -314,8 +306,4 @@ private Context getContextWithOptions(IndicesOptions indicesOptions) { when(context.getOptions()).thenReturn(indicesOptions); return context; } - - private List resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).toList(); - } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index da19bd68e288a..d58de5ca65ea0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata.State; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -1581,27 +1580,16 @@ public void testResolveExpressions() { .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetadata.builder("alias-1"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); + assertEquals(new HashSet<>(Arrays.asList("alias-0", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "alias-*")); assertEquals( - Set.of(new ResolvedExpression("alias-0"), new ResolvedExpression("alias-1")), - indexNameExpressionResolver.resolveExpressions(state, "alias-*") - ); - assertEquals( - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("alias-0"), new ResolvedExpression("alias-1")), + new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "test-0", "alias-*") ); assertEquals( - Set.of( - new ResolvedExpression("test-0"), - new ResolvedExpression("test-1"), - new ResolvedExpression("alias-0"), - new ResolvedExpression("alias-1") - ), + new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "test-*", "alias-*") ); - assertEquals( - Set.of(new ResolvedExpression("test-1"), new ResolvedExpression("alias-1")), - indexNameExpressionResolver.resolveExpressions(state, "*-1") - ); + assertEquals(new HashSet<>(Arrays.asList("test-1", "alias-1")), indexNameExpressionResolver.resolveExpressions(state, "*-1")); } public void testFilteringAliases() { @@ -1610,25 +1598,16 @@ public void testFilteringAliases() { .put(indexBuilder("test-1").state(State.OPEN).putAlias(AliasMetadata.builder("alias-1"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = Set.of(new ResolvedExpression("alias-0"), new ResolvedExpression("alias-1")); + Set resolvedExpressions = new HashSet<>(Arrays.asList("alias-0", "alias-1")); String[] strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertArrayEquals(new String[] { "alias-0" }, strings); // concrete index supersedes filtering alias - resolvedExpressions = Set.of( - new ResolvedExpression("test-0"), - new ResolvedExpression("alias-0"), - new ResolvedExpression("alias-1") - ); + resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "alias-0", "alias-1")); strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); - resolvedExpressions = Set.of( - new ResolvedExpression("test-0"), - new ResolvedExpression("test-1"), - new ResolvedExpression("alias-0"), - new ResolvedExpression("alias-1") - ); + resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-1", "alias-0", "alias-1")); strings = indexNameExpressionResolver.filteringAliases(state, "test-0", resolvedExpressions); assertNull(strings); } @@ -1642,7 +1621,7 @@ public void testIndexAliases() { .putAlias(AliasMetadata.builder("test-alias-non-filtering")) ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "test-*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "test-*"); String[] strings = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, true, resolvedExpressions); Arrays.sort(strings); @@ -1677,28 +1656,28 @@ public void testIndexAliasesDataStreamAliases() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); { // Only resolve aliases with with that refer to dataStreamName1 - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases(state, index, x -> true, x -> true, true, resolvedExpressions); assertThat(result, arrayContainingInAnyOrder("logs_foo", "logs", "logs_bar")); } { // Only resolve aliases with with that refer to dataStreamName2 - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex2.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases(state, index, x -> true, x -> true, true, resolvedExpressions); assertThat(result, arrayContainingInAnyOrder("logs_baz", "logs_baz2")); } { // Null is returned, because skipping identity check and resolvedExpressions contains the backing index name - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex2.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases(state, index, x -> true, x -> true, false, resolvedExpressions); assertThat(result, nullValue()); } { // Null is returned, because the wildcard expands to a list of aliases containing an unfiltered alias for dataStreamName1 - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "l*"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1712,7 +1691,7 @@ public void testIndexAliasesDataStreamAliases() { } { // Null is returned, because an unfiltered alias is targeting the same data stream - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "logs_bar", "logs"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, "logs_bar", "logs"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1726,7 +1705,7 @@ public void testIndexAliasesDataStreamAliases() { } { // The filtered alias is returned because although we target the data stream name, skipIdentity is true - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1740,7 +1719,7 @@ public void testIndexAliasesDataStreamAliases() { } { // Null is returned because we target the data stream name and skipIdentity is false - Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); + Set resolvedExpressions = indexNameExpressionResolver.resolveExpressions(state, dataStreamName1, "logs"); String index = backingIndex1.getIndex().getName(); String[] result = indexNameExpressionResolver.indexAliases( state, @@ -1763,13 +1742,13 @@ public void testIndexAliasesSkipIdentity() { ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); - Set resolvedExpressions = Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-alias")); + Set resolvedExpressions = new HashSet<>(Arrays.asList("test-0", "test-alias")); String[] aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, false, resolvedExpressions); assertNull(aliases); aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, true, resolvedExpressions); assertArrayEquals(new String[] { "test-alias" }, aliases); - resolvedExpressions = Collections.singleton(new ResolvedExpression("other-alias")); + resolvedExpressions = Collections.singleton("other-alias"); aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, false, resolvedExpressions); assertArrayEquals(new String[] { "other-alias" }, aliases); aliases = indexNameExpressionResolver.indexAliases(state, "test-0", x -> true, x -> true, true, resolvedExpressions); @@ -1790,7 +1769,7 @@ public void testConcreteWriteIndexSuccessful() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1872,7 +1851,7 @@ public void testConcreteWriteIndexWithWildcardExpansion() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-1"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1910,7 +1889,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithSingleIndex() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1946,7 +1925,7 @@ public void testConcreteWriteIndexWithNoWriteIndexWithMultipleIndices() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-1"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); @@ -1987,7 +1966,7 @@ public void testAliasResolutionNotAllowingMultipleIndices() { x -> true, x -> true, true, - Set.of(new ResolvedExpression("test-0"), new ResolvedExpression("test-1"), new ResolvedExpression("test-alias")) + new HashSet<>(Arrays.asList("test-0", "test-1", "test-alias")) ); Arrays.sort(strings); assertArrayEquals(new String[] { "test-alias" }, strings); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 05382de49087d..96a74d2e23aad 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -44,8 +44,10 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -74,6 +76,7 @@ import org.junit.Before; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -691,6 +694,178 @@ public void testAggregateSettingsAppliesSettingsFromTemplatesAndRequest() { assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("value2")); } + public void testAggregateSettingsProviderOverrulesSettingsFromRequest() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().put("template_setting", "value1")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().put("request_setting", "value2").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("request_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + + public void testAggregateSettingsProviderOverrulesNullFromRequest() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().put("template_setting", "value1")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().putNull("request_setting").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("request_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + + public void testAggregateSettingsProviderOverrulesSettingsFromTemplates() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().put("template_setting", "value1")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().put("request_setting", "value2").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("template_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("value2")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + + public void testAggregateSettingsProviderOverrulesNullFromTemplates() { + IndexTemplateMetadata templateMetadata = addMatchingTemplate(builder -> { + builder.settings(Settings.builder().putNull("template_setting")); + }); + Metadata metadata = new Metadata.Builder().templates(Map.of("template_1", templateMetadata)).build(); + ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); + request.settings(Settings.builder().put("request_setting", "value2").build()); + + Settings aggregatedIndexSettings = aggregateIndexSettings( + clusterState, + request, + templateMetadata.settings(), + null, + null, + Settings.EMPTY, + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + randomShardLimitService(), + Set.of(new IndexSettingProvider() { + @Override + public Settings getAdditionalIndexSettings( + String indexName, + String dataStreamName, + IndexMode templateIndexMode, + Metadata metadata, + Instant resolvedAt, + Settings indexTemplateAndCreateRequestSettings, + List combinedTemplateMappings + ) { + return Settings.builder().put("template_setting", "overrule_value").put("other_setting", "other_value").build(); + } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return true; + } + }) + ); + + assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("overrule_value")); + assertThat(aggregatedIndexSettings.get("request_setting"), equalTo("value2")); + assertThat(aggregatedIndexSettings.get("other_setting"), equalTo("other_value")); + } + public void testInvalidAliasName() { final String[] invalidAliasNames = new String[] { "-alias1", "+alias2", "_alias3", "a#lias", "al:ias", ".", ".." }; String aliasName = randomFrom(invalidAliasNames); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index 25ed5fb2bdab2..982394ca31b1c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata.State; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; @@ -21,13 +20,13 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; -import java.util.Set; import java.util.function.Predicate; -import java.util.stream.Collectors; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createBackingIndex; import static org.elasticsearch.common.util.set.Sets.newHashSet; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -51,52 +50,50 @@ public void testConvertWildcardsJustIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX"))), - equalTo(resolvedExpressionsSet("testXXX")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testXXX"))), + equalTo(newHashSet("testXXX")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "testYYY"))), - equalTo(resolvedExpressionsSet("testXXX", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), + equalTo(newHashSet("testXXX", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "ku*"))), - equalTo(resolvedExpressionsSet("testXXX", "kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "ku*"))), + equalTo(newHashSet("testXXX", "kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*", "kuku"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY", "kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*", "-kuku"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("*", "-kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( newHashSet( IndexNameExpressionResolver.WildcardExpressionResolver.resolve( context, - resolvedExpressions("testX*", "-doe", "-testXXX", "-testYYY") + Arrays.asList("testX*", "-doe", "-testXXX", "-testYYY") ) ), - equalTo(resolvedExpressionsSet("testXYY")) + equalTo(newHashSet("testXYY")) ); if (indicesOptions == IndicesOptions.lenientExpandOpen()) { assertThat( - newHashSet( - IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "-testXXX")) - ), - equalTo(resolvedExpressionsSet("testXXX", "-testXXX")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), + equalTo(newHashSet("testXXX", "-testXXX")) ); } else if (indicesOptions == IndicesOptions.strictExpandOpen()) { IndexNotFoundException infe = expectThrows( @@ -106,8 +103,8 @@ public void testConvertWildcardsJustIndicesTests() { assertEquals("-testXXX", infe.getIndex().getName()); } assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testXXX", "-testX*"))), - equalTo(resolvedExpressionsSet("testXXX")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testXXX", "-testX*"))), + equalTo(newHashSet("testXXX")) ); } @@ -125,24 +122,24 @@ public void testConvertWildcardsTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testYY*", "alias*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYY*", "alias*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("-kuku"))), - equalTo(resolvedExpressionsSet("-kuku")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("-kuku"))), + equalTo(newHashSet("-kuku")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*", "-testYYY"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("test*", "-testYYY"))), + equalTo(newHashSet("testXXX", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*", "testYYY"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testX*", "testYYY"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testYYY", "testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Arrays.asList("testYYY", "testX*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); } @@ -162,8 +159,8 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXXY", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); context = new IndexNameExpressionResolver.Context( state, @@ -171,8 +168,8 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXYY")) ); context = new IndexNameExpressionResolver.Context( state, @@ -180,8 +177,8 @@ public void testConvertWildcardsOpenClosedIndicesTests() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("testX*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXXY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXXY")) ); context = new IndexNameExpressionResolver.Context( state, @@ -220,27 +217,28 @@ public void testMultipleWildcards() { SystemIndexAccessLevel.NONE ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*X*"))), - equalTo(resolvedExpressionsSet("testXXX", "testXXY", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*X*Y"))), - equalTo(resolvedExpressionsSet("testXXY", "testXYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*X*Y"))), + equalTo(newHashSet("testXXY", "testXYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("kuku*Y*"))), - equalTo(resolvedExpressionsSet("kukuYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("kuku*Y*"))), + equalTo(newHashSet("kukuYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*Y*"))), - equalTo(resolvedExpressionsSet("testXXY", "testXYY", "testYYY", "kukuYYY")) + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*"))), + equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY")) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("test*Y*X"))).size(), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("test*Y*X"))) + .size(), equalTo(0) ); assertThat( - newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, resolvedExpressions("*Y*X"))).size(), + newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, Collections.singletonList("*Y*X"))).size(), equalTo(0) ); } @@ -259,11 +257,11 @@ public void testAll() { ); assertThat( newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); assertThat( newHashSet(IndexNameExpressionResolver.resolveExpressions(context, "_all")), - equalTo(resolvedExpressionsSet("testXXX", "testXYY", "testYYY")) + equalTo(newHashSet("testXXX", "testXYY", "testYYY")) ); IndicesOptions noExpandOptions = IndicesOptions.fromOptions( randomBoolean(), @@ -300,7 +298,7 @@ public void testAllAliases() { IndicesOptions.lenientExpandOpen(), // don't include hidden SystemIndexAccessLevel.NONE ); - assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(Set.of())); + assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(newHashSet())); } { @@ -321,7 +319,7 @@ public void testAllAliases() { ); assertThat( newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), - equalTo(resolvedExpressionsSet("index-visible-alias")) + equalTo(newHashSet("index-visible-alias")) ); } } @@ -364,7 +362,7 @@ public void testAllDataStreams() { assertThat( newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), - equalTo(resolvedExpressionsSet(DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis))) + equalTo(newHashSet(DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis))) ); } @@ -387,7 +385,7 @@ public void testAllDataStreams() { NONE ); - assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(Set.of())); + assertThat(newHashSet(IndexNameExpressionResolver.WildcardExpressionResolver.resolveAll(context)), equalTo(newHashSet())); } } @@ -508,16 +506,16 @@ public void testResolveAliases() { ); { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo_a*") + Collections.singletonList("foo_a*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_index", "bar_index"))); + assertThat(indices, containsInAnyOrder("foo_index", "bar_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesLenientContext, - resolvedExpressions("foo_a*") + Collections.singletonList("foo_a*") ); assertEquals(0, indices.size()); } @@ -526,45 +524,45 @@ public void testResolveAliases() { IndexNotFoundException.class, () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesStrictContext, - resolvedExpressions("foo_a*") + Collections.singletonList("foo_a*") ) ); assertEquals("foo_a*", infe.getIndex().getName()); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo*") + Collections.singletonList("foo*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_foo", "foo_index", "bar_index"))); + assertThat(indices, containsInAnyOrder("foo_foo", "foo_index", "bar_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesLenientContext, - resolvedExpressions("foo*") + Collections.singletonList("foo*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_foo", "foo_index"))); + assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesStrictContext, - resolvedExpressions("foo*") + Collections.singletonList("foo*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_foo", "foo_index"))); + assertThat(indices, containsInAnyOrder("foo_foo", "foo_index")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo_alias") + Collections.singletonList("foo_alias") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_alias"))); + assertThat(indices, containsInAnyOrder("foo_alias")); } { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( skipAliasesLenientContext, - resolvedExpressions("foo_alias") + Collections.singletonList("foo_alias") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_alias"))); + assertThat(indices, containsInAnyOrder("foo_alias")); } { IllegalArgumentException iae = expectThrows( @@ -583,11 +581,11 @@ public void testResolveAliases() { SystemIndexAccessLevel.NONE ); { - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( noExpandNoAliasesContext, - resolvedExpressions("foo_alias") + List.of("foo_alias") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_alias"))); + assertThat(indices, containsInAnyOrder("foo_alias")); } IndicesOptions strictNoExpandNoAliasesIndicesOptions = IndicesOptions.fromOptions( false, @@ -656,18 +654,18 @@ public void testResolveDataStreams() { ); // data streams are not included but expression matches the data stream - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("foo_*") + Collections.singletonList("foo_*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("foo_index", "foo_foo", "bar_index"))); + assertThat(indices, containsInAnyOrder("foo_index", "foo_foo", "bar_index")); // data streams are not included and expression doesn't match the data steram indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAndAliasesContext, - resolvedExpressions("bar_*") + Collections.singletonList("bar_*") ); - assertThat(newHashSet(indices), equalTo(resolvedExpressionsSet("bar_bar", "bar_index"))); + assertThat(indices, containsInAnyOrder("bar_bar", "bar_index")); } { @@ -693,39 +691,35 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesAndDataStreamsContext, - resolvedExpressions("foo_*") + Collections.singletonList("foo_*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); // include all wildcard adds the data stream's backing indices indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesAndDataStreamsContext, - resolvedExpressions("*") + Collections.singletonList("*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - "bar_bar", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + "bar_bar", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); } @@ -754,39 +748,35 @@ public void testResolveDataStreams() { ); // data stream's corresponding backing indices are resolved - Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( + Collection indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesDataStreamsAndHiddenIndices, - resolvedExpressions("foo_*") + Collections.singletonList("foo_*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); // include all wildcard adds the data stream's backing indices indices = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( indicesAliasesDataStreamsAndHiddenIndices, - resolvedExpressions("*") + Collections.singletonList("*") ); assertThat( - newHashSet(indices), - equalTo( - resolvedExpressionsSet( - "foo_index", - "bar_index", - "foo_foo", - "bar_bar", - DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), - DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) - ) + indices, + containsInAnyOrder( + "foo_index", + "bar_index", + "foo_foo", + "bar_bar", + DataStream.getDefaultBackingIndexName("foo_logs", 1, epochMillis), + DataStream.getDefaultBackingIndexName("foo_logs", 2, epochMillis) ) ); } @@ -818,28 +808,16 @@ public void testMatchesConcreteIndicesWildcardAndAliases() { SystemIndexAccessLevel.NONE ); - Collection matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - List.of(new ResolvedExpression("*")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("bar_bar", "foo_foo", "foo_index", "bar_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of(new ResolvedExpression("*"))); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("bar_bar", "foo_foo", "foo_index", "bar_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - List.of(new ResolvedExpression("foo*")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("foo_foo", "foo_index", "bar_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - onlyIndicesContext, - List.of(new ResolvedExpression("foo*")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("foo_foo", "foo_index"))); - matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - indicesAndAliasesContext, - List.of(new ResolvedExpression("foo_alias")) - ); - assertThat(newHashSet(matches), equalTo(resolvedExpressionsSet("foo_alias"))); + Collection matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("*")); + assertThat(matches, containsInAnyOrder("bar_bar", "foo_foo", "foo_index", "bar_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of("*")); + assertThat(matches, containsInAnyOrder("bar_bar", "foo_foo", "foo_index", "bar_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("foo*")); + assertThat(matches, containsInAnyOrder("foo_foo", "foo_index", "bar_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(onlyIndicesContext, List.of("foo*")); + assertThat(matches, containsInAnyOrder("foo_foo", "foo_index")); + matches = IndexNameExpressionResolver.WildcardExpressionResolver.resolve(indicesAndAliasesContext, List.of("foo_alias")); + assertThat(matches, containsInAnyOrder("foo_alias")); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, () -> IndexNameExpressionResolver.resolveExpressions(onlyIndicesContext, "foo_alias") @@ -862,19 +840,8 @@ private static IndexMetadata.Builder indexBuilder(String index) { private static void assertWildcardResolvesToEmpty(IndexNameExpressionResolver.Context context, String wildcardExpression) { IndexNotFoundException infe = expectThrows( IndexNotFoundException.class, - () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve( - context, - List.of(new ResolvedExpression(wildcardExpression)) - ) + () -> IndexNameExpressionResolver.WildcardExpressionResolver.resolve(context, List.of(wildcardExpression)) ); assertEquals(wildcardExpression, infe.getIndex().getName()); } - - private List resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).toList(); - } - - private Set resolvedExpressionsSet(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).collect(Collectors.toSet()); - } } diff --git a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java index 8bfd4c7c5ac68..22308e15f4845 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeMetadataTests.java @@ -167,10 +167,6 @@ public void testUpgradeMarksPreviousVersion() { assertThat(nodeMetadata.previousNodeVersion(), equalTo(buildVersion)); } - public static Version tooNewVersion() { - return Version.fromId(between(Version.CURRENT.id + 1, 99999999)); - } - public static IndexVersion tooNewIndexVersion() { return IndexVersion.fromId(between(IndexVersion.current().id() + 1, 99999999)); } @@ -179,10 +175,6 @@ public static BuildVersion tooNewBuildVersion() { return BuildVersion.fromVersionId(between(Version.CURRENT.id() + 1, 99999999)); } - public static Version tooOldVersion() { - return Version.fromId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); - } - public static BuildVersion tooOldBuildVersion() { return BuildVersion.fromVersionId(between(1, Version.CURRENT.minimumCompatibilityVersion().id - 1)); } diff --git a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java index bf3fc1697aa44..c7614e2d98eed 100644 --- a/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java +++ b/server/src/test/java/org/elasticsearch/env/OverrideNodeVersionCommandTests.java @@ -96,7 +96,9 @@ public void testFailsOnEmptyPath() { } public void testFailsIfUnnecessary() throws IOException { - final Version nodeVersion = Version.fromId(between(Version.CURRENT.minimumCompatibilityVersion().id, Version.CURRENT.id)); + final BuildVersion nodeVersion = BuildVersion.fromVersionId( + between(Version.CURRENT.minimumCompatibilityVersion().id, Version.CURRENT.id) + ); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); final ElasticsearchException elasticsearchException = expectThrows( @@ -107,7 +109,7 @@ public void testFailsIfUnnecessary() throws IOException { elasticsearchException.getMessage(), allOf( containsString("compatible with current version"), - containsString(Version.CURRENT.toString()), + containsString(BuildVersion.current().toString()), containsString(nodeVersion.toString()) ) ); @@ -115,7 +117,7 @@ public void testFailsIfUnnecessary() throws IOException { } public void testWarnsIfTooOld() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooOldVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooOldBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput("n"); @@ -137,11 +139,11 @@ public void testWarnsIfTooOld() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); } public void testWarnsIfTooNew() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooNewVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooNewBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("yy", "Yy", "n", "yes", "true", "N", "no")); @@ -162,11 +164,11 @@ public void testWarnsIfTooNew() throws Exception { expectThrows(IllegalStateException.class, () -> mockTerminal.readText("")); final NodeMetadata nodeMetadata = PersistedClusterStateService.nodeMetadata(dataPaths); - assertThat(nodeMetadata.nodeVersion().toVersion(), equalTo(nodeVersion)); + assertThat(nodeMetadata.nodeVersion(), equalTo(nodeVersion)); } public void testOverwritesIfTooOld() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooOldVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooOldBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("y", "Y")); @@ -189,7 +191,7 @@ public void testOverwritesIfTooOld() throws Exception { } public void testOverwritesIfTooNew() throws Exception { - final Version nodeVersion = NodeMetadataTests.tooNewVersion(); + final BuildVersion nodeVersion = NodeMetadataTests.tooNewBuildVersion(); PersistedClusterStateService.overrideVersion(nodeVersion, dataPaths); final MockTerminal mockTerminal = MockTerminal.create(); mockTerminal.addTextInput(randomFrom("y", "Y")); diff --git a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java index 450d123f551c8..4428a7e078510 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PersistedClusterStateServiceTests.java @@ -54,6 +54,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.env.BuildVersion; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -1414,14 +1415,17 @@ public void testOverrideLuceneVersion() throws IOException { assertThat(clusterState.metadata().version(), equalTo(version)); } + @UpdateForV9(owner = UpdateForV9.Owner.SEARCH_FOUNDATIONS) + BuildVersion overrideVersion = BuildVersion.fromVersionId(Version.V_8_0_0.id); + NodeMetadata prevMetadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); assertEquals(BuildVersion.current(), prevMetadata.nodeVersion()); - PersistedClusterStateService.overrideVersion(Version.V_8_0_0, persistedClusterStateService.getDataPaths()); + PersistedClusterStateService.overrideVersion(overrideVersion, persistedClusterStateService.getDataPaths()); NodeMetadata metadata = PersistedClusterStateService.nodeMetadata(persistedClusterStateService.getDataPaths()); - assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), metadata.nodeVersion()); + assertEquals(overrideVersion, metadata.nodeVersion()); for (Path p : persistedClusterStateService.getDataPaths()) { NodeMetadata individualMetadata = PersistedClusterStateService.nodeMetadata(p); - assertEquals(BuildVersion.fromVersionId(Version.V_8_0_0.id()), individualMetadata.nodeVersion()); + assertEquals(overrideVersion, individualMetadata.nodeVersion()); } } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java b/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java index 628de0b047bf5..adac8bf204f3e 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexSettingProviderTests.java @@ -23,15 +23,24 @@ public class IndexSettingProviderTests extends ESSingleNodeTestCase { public void testIndexCreation() throws Exception { - var indexService = createIndex("my-index1"); + Settings settings = Settings.builder().put("index.mapping.depth.limit", 10).build(); + var indexService = createIndex("my-index1", settings); assertFalse(indexService.getIndexSettings().getSettings().hasValue("index.refresh_interval")); + assertEquals("10", indexService.getIndexSettings().getSettings().get("index.mapping.depth.limit")); INDEX_SETTING_PROVIDER1_ENABLED.set(true); - indexService = createIndex("my-index2"); + indexService = createIndex("my-index2", settings); assertTrue(indexService.getIndexSettings().getSettings().hasValue("index.refresh_interval")); + assertEquals("10", indexService.getIndexSettings().getSettings().get("index.mapping.depth.limit")); + INDEX_SETTING_OVERRULING.set(true); + indexService = createIndex("my-index3", settings); + assertTrue(indexService.getIndexSettings().getSettings().hasValue("index.refresh_interval")); + assertEquals("100", indexService.getIndexSettings().getSettings().get("index.mapping.depth.limit")); + + INDEX_SETTING_DEPTH_ENABLED.set(false); INDEX_SETTING_PROVIDER2_ENABLED.set(true); - var e = expectThrows(IllegalArgumentException.class, () -> createIndex("my-index3")); + var e = expectThrows(IllegalArgumentException.class, () -> createIndex("my-index4", settings)); assertEquals( "additional index setting [index.refresh_interval] added by [TestIndexSettingsProvider] is already present", e.getMessage() @@ -47,7 +56,7 @@ public static class Plugin1 extends Plugin { @Override public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { - return List.of(new TestIndexSettingsProvider("index.refresh_interval", "-1", INDEX_SETTING_PROVIDER1_ENABLED)); + return List.of(new TestIndexSettingsProvider("-1", INDEX_SETTING_PROVIDER1_ENABLED)); } } @@ -56,22 +65,22 @@ public static class Plugin2 extends Plugin { @Override public Collection getAdditionalIndexSettingProviders(IndexSettingProvider.Parameters parameters) { - return List.of(new TestIndexSettingsProvider("index.refresh_interval", "100s", INDEX_SETTING_PROVIDER2_ENABLED)); + return List.of(new TestIndexSettingsProvider("100s", INDEX_SETTING_PROVIDER2_ENABLED)); } } private static final AtomicBoolean INDEX_SETTING_PROVIDER1_ENABLED = new AtomicBoolean(false); private static final AtomicBoolean INDEX_SETTING_PROVIDER2_ENABLED = new AtomicBoolean(false); + private static final AtomicBoolean INDEX_SETTING_DEPTH_ENABLED = new AtomicBoolean(true); + private static final AtomicBoolean INDEX_SETTING_OVERRULING = new AtomicBoolean(false); static class TestIndexSettingsProvider implements IndexSettingProvider { - private final String settingName; - private final String settingValue; + private final String intervalValue; private final AtomicBoolean enabled; - TestIndexSettingsProvider(String settingName, String settingValue, AtomicBoolean enabled) { - this.settingName = settingName; - this.settingValue = settingValue; + TestIndexSettingsProvider(String intervalValue, AtomicBoolean enabled) { + this.intervalValue = intervalValue; this.enabled = enabled; } @@ -86,10 +95,19 @@ public Settings getAdditionalIndexSettings( List combinedTemplateMappings ) { if (enabled.get()) { - return Settings.builder().put(settingName, settingValue).build(); + var builder = Settings.builder().put("index.refresh_interval", intervalValue); + if (INDEX_SETTING_DEPTH_ENABLED.get()) { + builder.put("index.mapping.depth.limit", 100); + } + return builder.build(); } else { return Settings.EMPTY; } } + + @Override + public boolean overrulesTemplateAndRequestSettings() { + return INDEX_SETTING_OVERRULING.get(); + } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java index 286be8d12570d..357ada3ad656d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BlockSourceReaderTests.java @@ -51,7 +51,7 @@ public void testEmptyArray() throws IOException { private void loadBlock(LeafReaderContext ctx, Consumer test) throws IOException { ValueFetcher valueFetcher = SourceValueFetcher.toString(Set.of("field")); BlockSourceReader.LeafIteratorLookup lookup = BlockSourceReader.lookupFromNorms("field"); - BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(valueFetcher, lookup, null); + BlockLoader loader = new BlockSourceReader.BytesRefsBlockLoader(valueFetcher, lookup); assertThat(loader.columnAtATimeReader(ctx), nullValue()); BlockLoader.RowStrideReader reader = loader.rowStrideReader(ctx); assertThat(loader.rowStrideStoredFieldSpec(), equalTo(StoredFieldsSpec.NEEDS_SOURCE)); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java index 934744ef3ef96..7a4ce8bcb03fa 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoredSourceFieldMapperTests.java @@ -962,6 +962,94 @@ public void testArrayWithNestedObjects() throws IOException { {"path":{"to":[{"id":[1,20,3]},{"id":10},{"id":0}]}}""", syntheticSource); } + public void testObjectArrayWithinNestedObjects() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").startObject("properties"); + { + b.startObject("to").field("type", "nested").startObject("properties"); + { + b.startObject("obj").startObject("properties"); + { + b.startObject("id").field("type", "integer").field("synthetic_source_keep", "arrays").endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startObject("to"); + { + b.startArray("obj"); + { + b.startObject().array("id", 1, 20, 3).endObject(); + b.startObject().field("id", 10).endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"to":{"obj":[{"id":[1,20,3]},{"id":10}]}}}""", syntheticSource); + } + + public void testObjectArrayWithinNestedObjectsArray() throws IOException { + DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { + b.startObject("path").startObject("properties"); + { + b.startObject("to").field("type", "nested").startObject("properties"); + { + b.startObject("obj").startObject("properties"); + { + b.startObject("id").field("type", "integer").field("synthetic_source_keep", "arrays").endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + } + b.endObject().endObject(); + })).documentMapper(); + + var syntheticSource = syntheticSource(documentMapper, b -> { + b.startObject("path"); + { + b.startArray("to"); + { + b.startObject(); + { + b.startArray("obj"); + { + b.startObject().array("id", 1, 20, 3).endObject(); + b.startObject().field("id", 10).endObject(); + } + b.endArray(); + } + b.endObject(); + b.startObject(); + { + b.startArray("obj"); + { + b.startObject().array("id", 200, 300, 500).endObject(); + b.startObject().field("id", 100).endObject(); + } + b.endArray(); + } + b.endObject(); + } + b.endArray(); + } + b.endObject(); + }); + assertEquals(""" + {"path":{"to":[{"obj":[{"id":[1,20,3]},{"id":10}]},{"obj":[{"id":[200,300,500]},{"id":100}]}]}}""", syntheticSource); + } + public void testArrayWithinArray() throws IOException { DocumentMapper documentMapper = createMapperService(syntheticSourceMapping(b -> { b.startObject("path"); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 86914cfe9ced7..c2375e948fda0 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -1353,6 +1353,7 @@ private void testBlockLoaderFromParent(boolean columnReader, boolean syntheticSo }; MapperService mapper = createMapperService(syntheticSource ? syntheticSourceMapping(buildFields) : mapping(buildFields)); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field.sub"); - testBlockLoader(columnReader, example, blockReaderSupport); + var sourceLoader = mapper.mappingLookup().newSourceLoader(SourceFieldMetrics.NOOP); + testBlockLoader(columnReader, example, blockReaderSupport, sourceLoader); } } diff --git a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java index 38e6ca0be0647..b0a14515f2fbc 100644 --- a/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/FsDirectoryFactoryTests.java @@ -115,8 +115,6 @@ private void doTestPreload(String... preload) throws IOException { var func = fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory); assertNotEquals(fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory), MMapDirectory.ALL_FILES); assertNotEquals(fsDirectoryFactory.preLoadFuncMap.get(mmapDirectory), MMapDirectory.NO_FILES); - assertTrue(func.test("foo.dvd", newIOContext(random()))); - assertTrue(func.test("foo.tmp", newIOContext(random()))); for (String ext : preload) { assertTrue("ext: " + ext, func.test("foo." + ext, newIOContext(random()))); } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index 17975b7d18dd8..36f7355a541c1 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -78,7 +77,6 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.stream.Collectors; import java.util.stream.Stream; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; @@ -679,27 +677,27 @@ public void testBuildAliasFilter() { ); ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); { - AliasFilter result = indicesService.buildAliasFilter(state, "test-0", resolvedExpressions("test-alias-0")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-0", Set.of("test-alias-0")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bar"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-1", resolvedExpressions("test-alias-0")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-1", Set.of("test-alias-0")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bar"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-0", resolvedExpressions("test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-0", Set.of("test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-1")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "baz"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-1", resolvedExpressions("test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-1", Set.of("test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-1")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bax"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-0", resolvedExpressions("test-alias-0", "test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-0", Set.of("test-alias-0", "test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0", "test-alias-1")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -708,7 +706,7 @@ public void testBuildAliasFilter() { assertThat(filter.should(), containsInAnyOrder(QueryBuilders.termQuery("foo", "baz"), QueryBuilders.termQuery("foo", "bar"))); } { - AliasFilter result = indicesService.buildAliasFilter(state, "test-1", resolvedExpressions("test-alias-0", "test-alias-1")); + AliasFilter result = indicesService.buildAliasFilter(state, "test-1", Set.of("test-alias-0", "test-alias-1")); assertThat(result.getAliases(), arrayContainingInAnyOrder("test-alias-0", "test-alias-1")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -720,7 +718,7 @@ public void testBuildAliasFilter() { AliasFilter result = indicesService.buildAliasFilter( state, "test-0", - resolvedExpressions("test-alias-0", "test-alias-1", "test-alias-non-filtering") + Set.of("test-alias-0", "test-alias-1", "test-alias-non-filtering") ); assertThat(result.getAliases(), emptyArray()); assertThat(result.getQueryBuilder(), nullValue()); @@ -729,7 +727,7 @@ public void testBuildAliasFilter() { AliasFilter result = indicesService.buildAliasFilter( state, "test-1", - resolvedExpressions("test-alias-0", "test-alias-1", "test-alias-non-filtering") + Set.of("test-alias-0", "test-alias-1", "test-alias-non-filtering") ); assertThat(result.getAliases(), emptyArray()); assertThat(result.getQueryBuilder(), nullValue()); @@ -756,19 +754,19 @@ public void testBuildAliasFilterDataStreamAliases() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metadata(mdBuilder).build(); { String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "bar"))); } { String index = backingIndex2.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo")); assertThat(result.getQueryBuilder(), equalTo(QueryBuilders.termQuery("foo", "baz"))); } { String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo", "logs")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo", "logs")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo", "logs")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -778,7 +776,7 @@ public void testBuildAliasFilterDataStreamAliases() { } { String index = backingIndex2.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo", "logs")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo", "logs")); assertThat(result.getAliases(), arrayContainingInAnyOrder("logs_foo", "logs")); BoolQueryBuilder filter = (BoolQueryBuilder) result.getQueryBuilder(); assertThat(filter.filter(), empty()); @@ -789,13 +787,13 @@ public void testBuildAliasFilterDataStreamAliases() { { // querying an unfiltered and a filtered alias for the same data stream should drop the filters String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs_foo", "logs", "logs_bar")); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs_foo", "logs", "logs_bar")); assertThat(result, is(AliasFilter.EMPTY)); } { // similarly, querying the data stream name and a filtered alias should drop the filter String index = backingIndex1.getIndex().getName(); - AliasFilter result = indicesService.buildAliasFilter(state, index, resolvedExpressions("logs", dataStreamName1)); + AliasFilter result = indicesService.buildAliasFilter(state, index, Set.of("logs", dataStreamName1)); assertThat(result, is(AliasFilter.EMPTY)); } } @@ -848,8 +846,4 @@ public void testWithTempIndexServiceHandlesExistingIndex() throws Exception { return null; }); } - - private Set resolvedExpressions(String... expressions) { - return Arrays.stream(expressions).map(ResolvedExpression::new).collect(Collectors.toSet()); - } } diff --git a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java index 8ee2754427dda..c0657b5888ad2 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/service/FileSettingsServiceTests.java @@ -250,7 +250,7 @@ public void testProcessFileChanges() throws Exception { fileSettingsService.start(); fileSettingsService.clusterChanged(new ClusterChangedEvent("test", clusterService.state(), ClusterState.EMPTY_STATE)); // second file change; contents still don't matter - writeTestFile(fileSettingsService.watchedFile(), "{}"); + overwriteTestFile(fileSettingsService.watchedFile(), "{}"); // wait for listener to be called (once for initial processing, once for subsequent update) assertTrue(latch.await(20, TimeUnit.SECONDS)); @@ -355,6 +355,12 @@ public void testHandleSnapshotRestoreResetsMetadata() throws Exception { private void writeTestFile(Path path, String contents) throws IOException { Path tempFilePath = createTempFile(); Files.writeString(tempFilePath, contents); - Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE); + Files.move(tempFilePath, path, StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); + } + + private void overwriteTestFile(Path path, String contents) throws IOException { + Path tempFilePath = createTempFile(); + Files.writeString(tempFilePath, contents); + Files.move(tempFilePath, path, StandardCopyOption.REPLACE_EXISTING); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java index 0d05c3d0cd77b..502ffdde62e5a 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/KeywordFieldSyntheticSourceSupport.java @@ -37,6 +37,11 @@ public class KeywordFieldSyntheticSourceSupport implements MapperTestCase.Synthe this.docValues = useFallbackSyntheticSource == false || ESTestCase.randomBoolean(); } + @Override + public boolean ignoreAbove() { + return ignoreAbove != null; + } + @Override public boolean preservesExactSource() { // We opt in into fallback synthetic source implementation diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java index 8bc2666bcfe3b..da04f30ff8023 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperServiceTestCase.java @@ -179,6 +179,11 @@ public final MapperService createMapperService(XContentBuilder mappings) throws return createMapperService(getVersion(), mappings); } + public final MapperService createSytheticSourceMapperService(XContentBuilder mappings) throws IOException { + var settings = Settings.builder().put("index.mapping.source.mode", "synthetic").build(); + return createMapperService(getVersion(), settings, () -> true, mappings); + } + protected IndexVersion getVersion() { return IndexVersion.current(); } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index 7669ada750c14..c89c0b2e37dd2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -49,6 +49,7 @@ import org.elasticsearch.script.ScriptFactory; import org.elasticsearch.script.field.DocValuesScriptFieldFactory; import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.fetch.StoredFieldsSpec; import org.elasticsearch.search.lookup.LeafStoredFieldsLookup; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.Source; @@ -1103,6 +1104,10 @@ default boolean preservesExactSource() { return false; } + default boolean ignoreAbove() { + return false; + } + /** * Examples that should work when source is generated from doc values. */ @@ -1321,15 +1326,12 @@ private BlockLoader getBlockLoader(boolean columnReader) { return mapper.fieldType(loaderFieldName).blockLoader(new MappedFieldType.BlockLoaderContext() { @Override public String indexName() { - return "test_index"; + return mapper.getIndexSettings().getIndex().getName(); } @Override public IndexSettings indexSettings() { - var imd = IndexMetadata.builder(indexName()) - .settings(MapperTestCase.indexSettings(IndexVersion.current(), 1, 1).put(Settings.EMPTY)) - .build(); - return new IndexSettings(imd, Settings.EMPTY); + return mapper.getIndexSettings(); } @Override @@ -1362,9 +1364,19 @@ public FieldNamesFieldMapper.FieldNamesFieldType fieldNames() { private void testBlockLoader(boolean syntheticSource, boolean columnReader) throws IOException { // TODO if we're not using synthetic source use a different sort of example. Or something. - SyntheticSourceExample example = syntheticSourceSupport(false, columnReader).example(5); + var syntheticSourceSupport = syntheticSourceSupport(false, columnReader); + SyntheticSourceExample example = syntheticSourceSupport.example(5); + if (syntheticSource && columnReader == false) { + // The synthetic source testing support can't always handle now the difference between stored and synthetic source mode. + // In case of ignore above, the ignored values are always appended after the valid values + // (both if field has doc values or stored field). While stored source just reads original values (from _source) and there + // is no notion of values that are ignored. + // TODO: fix this by improving block loader support: https://github.com/elastic/elasticsearch/issues/115257 + assumeTrue("inconsistent synthetic source testing support with ignore above", syntheticSourceSupport.ignoreAbove() == false); + } + // TODO: only rely index.mapping.source.mode setting XContentBuilder mapping = syntheticSource ? syntheticSourceFieldMapping(example.mapping) : fieldMapping(example.mapping); - MapperService mapper = createMapperService(mapping); + MapperService mapper = syntheticSource ? createSytheticSourceMapperService(mapping) : createMapperService(mapping); BlockReaderSupport blockReaderSupport = getSupportedReaders(mapper, "field"); if (syntheticSource) { // geo_point and point do not yet support synthetic source @@ -1373,11 +1385,16 @@ private void testBlockLoader(boolean syntheticSource, boolean columnReader) thro blockReaderSupport.syntheticSource ); } - testBlockLoader(columnReader, example, blockReaderSupport); + var sourceLoader = mapper.mappingLookup().newSourceLoader(SourceFieldMetrics.NOOP); + testBlockLoader(columnReader, example, blockReaderSupport, sourceLoader); } - protected final void testBlockLoader(boolean columnReader, SyntheticSourceExample example, BlockReaderSupport blockReaderSupport) - throws IOException { + protected final void testBlockLoader( + boolean columnReader, + SyntheticSourceExample example, + BlockReaderSupport blockReaderSupport, + SourceLoader sourceLoader + ) throws IOException { BlockLoader loader = blockReaderSupport.getBlockLoader(columnReader); Function valuesConvert = loadBlockExpected(blockReaderSupport, columnReader); if (valuesConvert == null) { @@ -1404,9 +1421,15 @@ protected final void testBlockLoader(boolean columnReader, SyntheticSourceExampl return; } } else { + StoredFieldsSpec storedFieldsSpec = loader.rowStrideStoredFieldSpec(); + if (storedFieldsSpec.requiresSource()) { + storedFieldsSpec = storedFieldsSpec.merge( + new StoredFieldsSpec(true, storedFieldsSpec.requiresMetadata(), sourceLoader.requiredStoredFields()) + ); + } BlockLoaderStoredFieldsFromLeafLoader storedFieldsLoader = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(loader.rowStrideStoredFieldSpec()).getLoader(ctx, null), - loader.rowStrideStoredFieldSpec().requiresSource() ? SourceLoader.FROM_STORED_SOURCE.leaf(ctx.reader(), null) : null + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + storedFieldsSpec.requiresSource() ? sourceLoader.leaf(ctx.reader(), null) : null ); storedFieldsLoader.advanceTo(0); BlockLoader.Builder builder = loader.builder(TestBlock.factory(ctx.reader().numDocs()), 1); diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java index b6a031c9ff906..97ded7f9a06f2 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/TextFieldFamilySyntheticSourceTestSetup.java @@ -51,24 +51,9 @@ public static MapperTestCase.BlockReaderSupport getSupportedReaders(MapperServic } public static Function loadBlockExpected(MapperTestCase.BlockReaderSupport blockReaderSupport, boolean columnReader) { - if (nullLoaderExpected(blockReaderSupport.mapper(), blockReaderSupport.loaderFieldName())) { - return null; - } return v -> ((BytesRef) v).utf8ToString(); } - private static boolean nullLoaderExpected(MapperService mapper, String fieldName) { - MappedFieldType type = mapper.fieldType(fieldName); - if (type instanceof TextFieldMapper.TextFieldType t) { - if (t.isSyntheticSource() == false || t.canUseSyntheticSourceDelegateForQuerying() || t.isStored()) { - return false; - } - String parentField = mapper.mappingLookup().parentField(fieldName); - return parentField == null || nullLoaderExpected(mapper, parentField); - } - return false; - } - public static void validateRoundTripReader(String syntheticSource, DirectoryReader reader, DirectoryReader roundTripReader) { // `reader` here is reader of original document and `roundTripReader` reads document // created from synthetic source. @@ -98,6 +83,11 @@ private static class TextFieldFamilySyntheticSourceSupport implements MapperTest ); } + @Override + public boolean ignoreAbove() { + return keywordMultiFieldSyntheticSourceSupport.ignoreAbove(); + } + @Override public MapperTestCase.SyntheticSourceExample example(int maxValues) { if (store) { diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 8bc81fef2157d..7a72a7bd0daf0 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; import org.elasticsearch.plugins.Plugin; @@ -366,15 +365,9 @@ protected static Settings.Builder indexSettingsNoReplicas(int shards) { /** * Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format. */ - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - // This used to pick an index version from 7.0.0 to 8.9.0. The minimum now is 8.0.0 but it's not clear what the upper range should be protected void maybeInitWithOldSnapshotVersion(String repoName, Path repoPath) throws Exception { if (randomBoolean() && randomBoolean()) { - initWithSnapshotVersion( - repoName, - repoPath, - IndexVersionUtils.randomVersionBetween(random(), IndexVersions.MINIMUM_COMPATIBLE, IndexVersions.V_8_9_0) - ); + initWithSnapshotVersion(repoName, repoPath, IndexVersionUtils.randomVersion()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java deleted file mode 100644 index 9e2f8c931c84a..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.test.disruption; - -import org.elasticsearch.core.TimeValue; - -import java.util.HashSet; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Simulates irregular long gc intervals. - */ -public class IntermittentLongGCDisruption extends LongGCDisruption { - - volatile boolean disrupting; - volatile Thread worker; - - final long intervalBetweenDelaysMin; - final long intervalBetweenDelaysMax; - final long delayDurationMin; - final long delayDurationMax; - - public IntermittentLongGCDisruption( - Random random, - String disruptedNode, - long intervalBetweenDelaysMin, - long intervalBetweenDelaysMax, - long delayDurationMin, - long delayDurationMax - ) { - super(random, disruptedNode); - this.intervalBetweenDelaysMin = intervalBetweenDelaysMin; - this.intervalBetweenDelaysMax = intervalBetweenDelaysMax; - this.delayDurationMin = delayDurationMin; - this.delayDurationMax = delayDurationMax; - } - - static final AtomicInteger thread_ids = new AtomicInteger(); - - @Override - public void startDisrupting() { - disrupting = true; - worker = new Thread(new BackgroundWorker(), "long_gc_simulation_" + thread_ids.incrementAndGet()); - worker.setDaemon(true); - worker.start(); - } - - @Override - public void stopDisrupting() { - if (worker == null) { - return; - } - logger.info("stopping long GCs on [{}]", disruptedNode); - disrupting = false; - worker.interrupt(); - try { - worker.join(2 * (intervalBetweenDelaysMax + delayDurationMax)); - } catch (InterruptedException e) { - logger.info("background thread failed to stop"); - } - worker = null; - } - - private void simulateLongGC(final TimeValue duration) throws InterruptedException { - logger.info("node [{}] goes into GC for for [{}]", disruptedNode, duration); - final Set nodeThreads = new HashSet<>(); - try { - while (suspendThreads(nodeThreads)) - ; - if (nodeThreads.isEmpty() == false) { - Thread.sleep(duration.millis()); - } - } finally { - logger.info("node [{}] resumes from GC", disruptedNode); - resumeThreads(nodeThreads); - } - } - - class BackgroundWorker implements Runnable { - - @Override - public void run() { - while (disrupting) { - try { - TimeValue duration = new TimeValue(delayDurationMin + random.nextInt((int) (delayDurationMax - delayDurationMin))); - simulateLongGC(duration); - - duration = new TimeValue( - intervalBetweenDelaysMin + random.nextInt((int) (intervalBetweenDelaysMax - intervalBetweenDelaysMin)) - ); - if (disrupting) { - Thread.sleep(duration.millis()); - } - } catch (InterruptedException e) {} catch (Exception e) { - logger.error("error in background worker", e); - } - } - } - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java deleted file mode 100644 index dce9e2600d0a6..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java +++ /dev/null @@ -1,350 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.test.disruption; - -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.core.TimeValue; -import org.elasticsearch.test.InternalTestCluster; - -import java.lang.management.ManagementFactory; -import java.lang.management.ThreadInfo; -import java.lang.management.ThreadMXBean; -import java.util.Arrays; -import java.util.Random; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -/** - * Suspends all threads on the specified node in order to simulate a long gc. - */ -public class LongGCDisruption extends SingleNodeDisruption { - - private static final Pattern[] unsafeClasses = new Pattern[] { - // logging has shared JVM locks; we may suspend a thread and block other nodes from doing their thing - Pattern.compile("logging\\.log4j"), - // security manager is shared across all nodes and it uses synchronized maps internally - Pattern.compile("java\\.lang\\.SecurityManager"), - // SecureRandom instance from SecureRandomHolder class is shared by all nodes - Pattern.compile("java\\.security\\.SecureRandom"), - // Lucene's WindowsFS is shared across nodes and contains some coarse synchronization - Pattern.compile("org\\.apache\\.lucene\\.tests\\.mockfile\\.WindowsFS") }; - - private static final ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); - - protected final String disruptedNode; - private Set suspendedThreads; - private Thread blockDetectionThread; - - private final AtomicBoolean sawSlowSuspendBug = new AtomicBoolean(false); - - public LongGCDisruption(Random random, String disruptedNode) { - super(random); - this.disruptedNode = disruptedNode; - } - - /** - * Checks if during disruption we ran into a known JVM issue that makes {@link Thread#suspend()} calls block for multiple seconds - * was observed. - * @see JDK-8218446 - * @return true if during thread suspending a call to {@link Thread#suspend()} took more than 3s - */ - public boolean sawSlowSuspendBug() { - return sawSlowSuspendBug.get(); - } - - @Override - public synchronized void startDisrupting() { - if (suspendedThreads == null) { - boolean success = false; - try { - suspendedThreads = ConcurrentHashMap.newKeySet(); - - final String currentThreadName = Thread.currentThread().getName(); - assert isDisruptedNodeThread(currentThreadName) == false - : "current thread match pattern. thread name: " + currentThreadName + ", node: " + disruptedNode; - // we spawn a background thread to protect against deadlock which can happen - // if there are shared resources between caller thread and suspended threads - // see unsafeClasses to how to avoid that - final AtomicReference suspendingError = new AtomicReference<>(); - final Thread suspendingThread = new Thread(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - suspendingError.set(e); - } - - @Override - protected void doRun() throws Exception { - // keep trying to suspend threads, until no new threads are discovered. - while (suspendThreads(suspendedThreads)) { - if (Thread.interrupted()) { - return; - } - } - } - }); - suspendingThread.setName(currentThreadName + "[LongGCDisruption][threadSuspender]"); - suspendingThread.start(); - try { - suspendingThread.join(getSuspendingTimeoutInMillis()); - } catch (InterruptedException e) { - suspendingThread.interrupt(); // best effort to signal suspending - throw new RuntimeException(e); - } - if (suspendingError.get() != null) { - throw new RuntimeException("unknown error while suspending threads", suspendingError.get()); - } - if (suspendingThread.isAlive()) { - logger.warn( - """ - failed to suspend node [{}]'s threads within [{}] millis. Suspending thread stack trace: - {} - Threads that weren't suspended: - {}""", - disruptedNode, - getSuspendingTimeoutInMillis(), - stackTrace(suspendingThread.getStackTrace()), - suspendedThreads.stream() - .map(t -> t.getName() + "\n----\n" + stackTrace(t.getStackTrace())) - .collect(Collectors.joining("\n")) - ); - suspendingThread.interrupt(); // best effort; - try { - /* - * We need to join on the suspending thread in case it has suspended a thread that is in a critical section and - * needs to be resumed. - */ - suspendingThread.join(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - throw new RuntimeException("suspending node threads took too long"); - } - // block detection checks if other threads are blocked waiting on an object that is held by one - // of the threads that was suspended - if (isBlockDetectionSupported()) { - blockDetectionThread = new Thread(new AbstractRunnable() { - @Override - public void onFailure(Exception e) { - if (e instanceof InterruptedException == false) { - throw new AssertionError("unexpected exception in blockDetectionThread", e); - } - } - - @Override - protected void doRun() throws Exception { - while (Thread.currentThread().isInterrupted() == false) { - ThreadInfo[] threadInfos = threadBean.dumpAllThreads(true, true); - for (ThreadInfo threadInfo : threadInfos) { - if (isDisruptedNodeThread(threadInfo.getThreadName()) == false - && threadInfo.getLockOwnerName() != null - && isDisruptedNodeThread(threadInfo.getLockOwnerName())) { - - // find ThreadInfo object of the blocking thread (if available) - ThreadInfo blockingThreadInfo = null; - for (ThreadInfo otherThreadInfo : threadInfos) { - if (otherThreadInfo.getThreadId() == threadInfo.getLockOwnerId()) { - blockingThreadInfo = otherThreadInfo; - break; - } - } - onBlockDetected(threadInfo, blockingThreadInfo); - } - } - Thread.sleep(getBlockDetectionIntervalInMillis()); - } - } - }); - blockDetectionThread.setName(currentThreadName + "[LongGCDisruption][blockDetection]"); - blockDetectionThread.start(); - } - success = true; - } finally { - if (success == false) { - stopBlockDetection(); - // resume threads if failed - resumeThreads(suspendedThreads); - suspendedThreads = null; - } - } - } else { - throw new IllegalStateException("can't disrupt twice, call stopDisrupting() first"); - } - } - - public boolean isDisruptedNodeThread(String threadName) { - return threadName.contains("[" + disruptedNode + "]"); - } - - private static String stackTrace(StackTraceElement[] stackTraceElements) { - return Arrays.stream(stackTraceElements).map(Object::toString).collect(Collectors.joining("\n")); - } - - @Override - public synchronized void stopDisrupting() { - stopBlockDetection(); - if (suspendedThreads != null) { - resumeThreads(suspendedThreads); - suspendedThreads = null; - } - } - - private void stopBlockDetection() { - if (blockDetectionThread != null) { - try { - blockDetectionThread.interrupt(); // best effort - blockDetectionThread.join(getSuspendingTimeoutInMillis()); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - blockDetectionThread = null; - } - } - - @Override - public void removeAndEnsureHealthy(InternalTestCluster cluster) { - removeFromCluster(cluster); - ensureNodeCount(cluster); - } - - @Override - public TimeValue expectedTimeToHeal() { - return TimeValue.timeValueMillis(0); - } - - /** - * resolves all threads belonging to given node and suspends them if their current stack trace - * is "safe". Threads are added to nodeThreads if suspended. - * - * returns true if some live threads were found. The caller is expected to call this method - * until no more "live" are found. - */ - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally - @SuppressForbidden(reason = "suspends/resumes threads intentionally") - protected boolean suspendThreads(Set nodeThreads) { - Thread[] allThreads = null; - while (allThreads == null) { - allThreads = new Thread[Thread.activeCount()]; - if (Thread.enumerate(allThreads) > allThreads.length) { - // we didn't make enough space, retry - allThreads = null; - } - } - boolean liveThreadsFound = false; - for (Thread thread : allThreads) { - if (thread == null) { - continue; - } - String threadName = thread.getName(); - if (isDisruptedNodeThread(threadName)) { - if (thread.isAlive() && nodeThreads.add(thread)) { - liveThreadsFound = true; - logger.trace("suspending thread [{}]", threadName); - // we assume it is not safe to suspend the thread - boolean safe = false; - try { - /* - * At the bottom of this try-block we will know whether or not it is safe to suspend the thread; we start by - * assuming that it is safe. - */ - boolean definitelySafe = true; - final long startTime = System.nanoTime(); - thread.suspend(); - if (System.nanoTime() - startTime > TimeUnit.SECONDS.toNanos(3L)) { - sawSlowSuspendBug.set(true); - } - // double check the thread is not in a shared resource like logging; if so, let it go and come back - safe: for (StackTraceElement stackElement : thread.getStackTrace()) { - String className = stackElement.getClassName(); - for (Pattern unsafePattern : getUnsafeClasses()) { - if (unsafePattern.matcher(className).find()) { - // it is definitely not safe to suspend the thread - definitelySafe = false; - break safe; - } - } - } - safe = definitelySafe; - } finally { - if (safe == false) { - /* - * Do not log before resuming as we might be interrupted while logging in which case we will throw an - * interrupted exception and never resume the suspended thread that is in a critical section. Also, logging - * before resuming makes for confusing log messages if we never hit the resume. - */ - thread.resume(); - logger.trace("resumed thread [{}] as it is in a critical section", threadName); - nodeThreads.remove(thread); - } - } - } - } - } - return liveThreadsFound; - } - - // for testing - protected Pattern[] getUnsafeClasses() { - return unsafeClasses; - } - - // for testing - protected long getSuspendingTimeoutInMillis() { - return TimeValue.timeValueSeconds(30).getMillis(); - } - - public boolean isBlockDetectionSupported() { - return threadBean.isObjectMonitorUsageSupported() && threadBean.isSynchronizerUsageSupported(); - } - - // for testing - protected long getBlockDetectionIntervalInMillis() { - return 3000L; - } - - // for testing - protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo blockingThread) { - String blockedThreadStackTrace = stackTrace(blockedThread.getStackTrace()); - String blockingThreadStackTrace = blockingThread != null ? stackTrace(blockingThread.getStackTrace()) : "not available"; - throw new AssertionError( - "Thread [" - + blockedThread.getThreadName() - + "] is blocked waiting on the resource [" - + blockedThread.getLockInfo() - + "] held by the suspended thread [" - + blockedThread.getLockOwnerName() - + "] of the disrupted node [" - + disruptedNode - + "].\n" - + "Please add this occurrence to the unsafeClasses list in [" - + LongGCDisruption.class.getName() - + "].\n" - + "Stack trace of blocked thread: " - + blockedThreadStackTrace - + "\n" - + "Stack trace of blocking thread: " - + blockingThreadStackTrace - ); - } - - @SuppressWarnings("deprecation") // suspends/resumes threads intentionally - @SuppressForbidden(reason = "suspends/resumes threads intentionally") - protected void resumeThreads(Set threads) { - for (Thread thread : threads) { - thread.resume(); - } - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index fd376fcd07688..18c591166e720 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -39,7 +39,6 @@ import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; @@ -50,7 +49,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; @@ -586,13 +584,8 @@ public void sendRequest( // poor mans request cloning... BytesStreamOutput bStream = new BytesStreamOutput(); request.writeTo(bStream); - final TransportRequest clonedRequest; - if (request instanceof BytesTransportRequest) { - clonedRequest = copyRawBytesForBwC(bStream); - } else { - RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); - clonedRequest = reg.newRequest(bStream.bytes().streamInput()); - } + RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); + final TransportRequest clonedRequest = reg.newRequest(bStream.bytes().streamInput()); assert clonedRequest.getClass().equals(MasterNodeRequestHelper.unwrapTermOverride(request).getClass()) : clonedRequest + " vs " + request; @@ -640,15 +633,6 @@ protected void doRun() throws IOException { } } - // Some request handlers read back a BytesTransportRequest - // into a different class that cannot be re-serialized (i.e. JOIN_VALIDATE_ACTION_NAME), - // in those cases we just copy the raw bytes back to a BytesTransportRequest. - // This is only needed for the BwC for JOIN_VALIDATE_ACTION_NAME and can be removed in the next major - @UpdateForV9(owner = UpdateForV9.Owner.DISTRIBUTED_COORDINATION) - private static TransportRequest copyRawBytesForBwC(BytesStreamOutput bStream) throws IOException { - return new BytesTransportRequest(bStream.bytes().streamInput()); - } - @Override public void clearCallback() { synchronized (this) { diff --git a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java b/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java deleted file mode 100644 index 72ecba8d502f1..0000000000000 --- a/test/framework/src/test/java/org/elasticsearch/test/disruption/LongGCDisruptionTests.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ -package org.elasticsearch.test.disruption; - -import org.elasticsearch.core.Nullable; -import org.elasticsearch.test.ESTestCase; -import org.junit.BeforeClass; - -import java.lang.management.ThreadInfo; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantLock; -import java.util.regex.Pattern; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; - -public class LongGCDisruptionTests extends ESTestCase { - - static class LockedExecutor { - ReentrantLock lock = new ReentrantLock(); - - public void executeLocked(Runnable r) { - lock.lock(); - try { - r.run(); - } finally { - lock.unlock(); - } - } - } - - @BeforeClass - public static void ignoreJdk20Plus() { - assumeFalse("jdk20 removed thread suspend/resume", Runtime.version().feature() >= 20); - } - - public void testBlockingTimeout() throws Exception { - final String nodeName = "test_node"; - LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[] { Pattern.compile(LockedExecutor.class.getSimpleName()) }; - } - - @Override - protected long getSuspendingTimeoutInMillis() { - return 100; - } - }; - final AtomicBoolean stop = new AtomicBoolean(); - final CountDownLatch underLock = new CountDownLatch(1); - final CountDownLatch pauseUnderLock = new CountDownLatch(1); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final Thread[] threads = new Thread[10]; - try { - for (int i = 0; i < 10; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 9 && randomBoolean()) || i == 0; - threads[i] = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { - try { - underLock.countDown(); - ops.incrementAndGet(); - pauseUnderLock.await(); - } catch (InterruptedException e) { - - } - }); - } else { - ops.incrementAndGet(); - } - } - }); - threads[i].setName("[" + nodeName + "][" + i + "]"); - threads[i].start(); - } - // make sure some threads are under lock - underLock.await(); - RuntimeException e = expectThrows(RuntimeException.class, disruption::startDisrupting); - assertThat(e.getMessage(), containsString("suspending node threads took too long")); - } finally { - stop.set(true); - pauseUnderLock.countDown(); - for (final Thread thread : threads) { - thread.join(); - } - } - } - - /** - * Checks that a GC disruption never blocks threads while they are doing something "unsafe" - * but does keep retrying until all threads can be safely paused - */ - public void testNotBlockingUnsafeStackTraces() throws Exception { - final String nodeName = "test_node"; - LongGCDisruption disruption = new LongGCDisruption(random(), nodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[] { Pattern.compile(LockedExecutor.class.getSimpleName()) }; - } - }; - final AtomicBoolean stop = new AtomicBoolean(); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final Thread[] threads = new Thread[5]; - final Runnable yieldAndIncrement = () -> { - Thread.yield(); // give some chance to catch this stack trace - ops.incrementAndGet(); - }; - try { - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - for (int iter = 0; stop.get() == false; iter++) { - if (iter % 2 == 0) { - lockedExecutor.executeLocked(yieldAndIncrement); - } else { - yieldAndIncrement.run(); - } - } - }); - threads[i].setName("[" + nodeName + "][" + i + "]"); - threads[i].start(); - } - // make sure some threads are under lock - try { - disruption.startDisrupting(); - } catch (RuntimeException e) { - if (e.getMessage().contains("suspending node threads took too long") && disruption.sawSlowSuspendBug()) { - return; - } - throw new AssertionError(e); - } - long first = ops.get(); - assertThat(lockedExecutor.lock.isLocked(), equalTo(false)); // no threads should own the lock - Thread.sleep(100); - assertThat(ops.get(), equalTo(first)); - disruption.stopDisrupting(); - assertBusy(() -> assertThat(ops.get(), greaterThan(first))); - } finally { - disruption.stopDisrupting(); - stop.set(true); - for (final Thread thread : threads) { - thread.join(); - } - } - } - - public void testBlockDetection() throws Exception { - final String disruptedNodeName = "disrupted_node"; - final String blockedNodeName = "blocked_node"; - CountDownLatch waitForBlockDetectionResult = new CountDownLatch(1); - AtomicReference blockDetectionResult = new AtomicReference<>(); - LongGCDisruption disruption = new LongGCDisruption(random(), disruptedNodeName) { - @Override - protected Pattern[] getUnsafeClasses() { - return new Pattern[0]; - } - - @Override - protected void onBlockDetected(ThreadInfo blockedThread, @Nullable ThreadInfo blockingThread) { - blockDetectionResult.set(blockedThread); - waitForBlockDetectionResult.countDown(); - } - - @Override - protected long getBlockDetectionIntervalInMillis() { - return 10L; - } - }; - if (disruption.isBlockDetectionSupported() == false) { - return; - } - final AtomicBoolean stop = new AtomicBoolean(); - final CountDownLatch underLock = new CountDownLatch(1); - final CountDownLatch pauseUnderLock = new CountDownLatch(1); - final LockedExecutor lockedExecutor = new LockedExecutor(); - final AtomicLong ops = new AtomicLong(); - final List threads = new ArrayList<>(); - try { - for (int i = 0; i < 5; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 4 && randomBoolean()) || i == 0; - Thread thread = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { - try { - underLock.countDown(); - ops.incrementAndGet(); - pauseUnderLock.await(); - } catch (InterruptedException e) { - - } - }); - } else { - ops.incrementAndGet(); - } - } - }); - - thread.setName("[" + disruptedNodeName + "][" + i + "]"); - threads.add(thread); - thread.start(); - } - - for (int i = 0; i < 5; i++) { - // at least one locked and one none lock thread - final boolean lockedExec = (i < 4 && randomBoolean()) || i == 0; - Thread thread = new Thread(() -> { - while (stop.get() == false) { - if (lockedExec) { - lockedExecutor.executeLocked(() -> { ops.incrementAndGet(); }); - } else { - ops.incrementAndGet(); - } - } - }); - thread.setName("[" + blockedNodeName + "][" + i + "]"); - threads.add(thread); - thread.start(); - } - // make sure some threads of test_node are under lock - underLock.await(); - disruption.startDisrupting(); - assertTrue(waitForBlockDetectionResult.await(30, TimeUnit.SECONDS)); - disruption.stopDisrupting(); - - ThreadInfo threadInfo = blockDetectionResult.get(); - assertNotNull(threadInfo); - assertThat(threadInfo.getThreadName(), containsString("[" + blockedNodeName + "]")); - assertThat(threadInfo.getLockOwnerName(), containsString("[" + disruptedNodeName + "]")); - assertThat(threadInfo.getLockInfo().getClassName(), containsString(ReentrantLock.class.getName())); - } finally { - stop.set(true); - pauseUnderLock.countDown(); - for (final Thread thread : threads) { - thread.join(); - } - } - } -} diff --git a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java index f517c03468bc2..753ce8283afca 100644 --- a/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java +++ b/x-pack/plugin/analytics/src/test/java/org/elasticsearch/xpack/analytics/rate/TimeSeriesRateAggregatorTests.java @@ -156,8 +156,12 @@ public void testNestedWithinAutoDateHistogram() throws IOException { AggTestConfig aggTestConfig = new AggTestConfig(tsBuilder, timeStampField(), counterField("counter_field")) .withSplitLeavesIntoSeperateAggregators(false); expectThrows(IllegalArgumentException.class, () -> testCase(iw -> { - iw.addDocuments(docs(2000, "1", 15, 37, 60, /*reset*/ 14)); - iw.addDocuments(docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)); + for (Document document : docs(2000, "1", 15, 37, 60, /*reset*/ 14)) { + iw.addDocument(document); + } + for (Document document : docs(2000, "2", 74, 150, /*reset*/ 50, 90, /*reset*/ 40)) { + iw.addDocument(document); + } }, verifier, aggTestConfig)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java index 3674103eda215..f8cb371687d72 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.Collections; import java.util.EnumSet; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -92,10 +93,13 @@ public OperationMode getOperationMode() { } public Map getPolicies() { - return policyMetadatas.values() - .stream() - .map(LifecyclePolicyMetadata::getPolicy) - .collect(Collectors.toMap(LifecyclePolicy::getName, Function.identity())); + // note: this loop is unrolled rather than streaming-style because it's hot enough to show up in a flamegraph + Map policies = new HashMap<>(policyMetadatas.size()); + for (LifecyclePolicyMetadata policyMetadata : policyMetadatas.values()) { + LifecyclePolicy policy = policyMetadata.getPolicy(); + policies.put(policy.getName(), policy); + } + return Collections.unmodifiableMap(policies); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java index 6e06133509644..ea0462d0f103e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/GetInferenceModelAction.java @@ -62,7 +62,8 @@ public Request(StreamInput in) throws IOException { super(in); this.inferenceEntityId = in.readString(); this.taskType = TaskType.fromStream(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) + || in.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { this.persistDefaultConfig = in.readBoolean(); } else { this.persistDefaultConfig = PERSIST_DEFAULT_CONFIGS; @@ -87,7 +88,8 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(inferenceEntityId); taskType.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ) + || out.getTransportVersion().isPatchFrom(TransportVersions.INFERENCE_DONT_PERSIST_ON_READ_BACKPORT_8_16)) { out.writeBoolean(this.persistDefaultConfig); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index ca789fee7b744..b298d486c9e03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -71,7 +71,7 @@ public class StartTrainedModelDeploymentAction extends ActionType implements ToXCon /** * If the queue is created then we can OOM when we create the queue. */ - private static final int MAX_QUEUE_CAPACITY = 1_000_000; + private static final int MAX_QUEUE_CAPACITY = 100_000; public static final ParseField MODEL_ID = new ParseField("model_id"); public static final ParseField DEPLOYMENT_ID = new ParseField("deployment_id"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java index e61342d281c90..83f7832645270 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java @@ -10,9 +10,7 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.ingest.ConfigurationUtils; import org.elasticsearch.ingest.IngestMetadata; -import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.transport.Transports; import java.util.HashMap; @@ -24,6 +22,7 @@ import java.util.function.Consumer; import static org.elasticsearch.inference.InferenceResults.MODEL_ID_RESULTS_FIELD; +import static org.elasticsearch.ingest.Pipeline.ON_FAILURE_KEY; import static org.elasticsearch.ingest.Pipeline.PROCESSORS_KEY; /** @@ -53,16 +52,10 @@ public static int countInferenceProcessors(ClusterState state) { Counter counter = Counter.newCounter(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = (List>) configMap.get(PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { - addModelsAndPipelines( - entry.getKey(), - pipelineId, - (Map) entry.getValue(), - pam -> counter.addAndGet(1), - 0 - ); + addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> counter.addAndGet(1), 0); } } }); @@ -73,7 +66,6 @@ public static int countInferenceProcessors(ClusterState state) { * @param ingestMetadata The ingestMetadata of current ClusterState * @return The set of model IDs referenced by inference processors */ - @SuppressWarnings("unchecked") public static Set getModelIdsFromInferenceProcessors(IngestMetadata ingestMetadata) { if (ingestMetadata == null) { return Set.of(); @@ -82,7 +74,7 @@ public static Set getModelIdsFromInferenceProcessors(IngestMetadata inge Set modelIds = new LinkedHashSet<>(); ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> modelIds.add(pam.modelIdOrAlias()), 0); @@ -96,7 +88,6 @@ public static Set getModelIdsFromInferenceProcessors(IngestMetadata inge * @param state Current cluster state * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them. */ - @SuppressWarnings("unchecked") public static Map> pipelineIdsByResource(ClusterState state, Set ids) { assert Transports.assertNotTransportThread("non-trivial nested loops over cluster state structures"); Map> pipelineIdsByModelIds = new HashMap<>(); @@ -110,7 +101,7 @@ public static Map> pipelineIdsByResource(ClusterState state, } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> { @@ -128,7 +119,6 @@ public static Map> pipelineIdsByResource(ClusterState state, * @param state Current {@link ClusterState} * @return a map from Model or Deployment IDs or Aliases to each pipeline referencing them. */ - @SuppressWarnings("unchecked") public static Set pipelineIdsForResource(ClusterState state, Set ids) { assert Transports.assertNotTransportThread("non-trivial nested loops over cluster state structures"); Set pipelineIds = new HashSet<>(); @@ -142,7 +132,7 @@ public static Set pipelineIdsForResource(ClusterState state, Set } ingestMetadata.getPipelines().forEach((pipelineId, configuration) -> { Map configMap = configuration.getConfigAsMap(); - List> processorConfigs = ConfigurationUtils.readList(null, null, configMap, PROCESSORS_KEY); + List> processorConfigs = readList(configMap, PROCESSORS_KEY); for (Map processorConfigWithKey : processorConfigs) { for (Map.Entry entry : processorConfigWithKey.entrySet()) { addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), pam -> { @@ -188,7 +178,7 @@ private static void addModelsAndPipelines( addModelsAndPipelines( innerProcessorWithName.getKey(), pipelineId, - (Map) innerProcessorWithName.getValue(), + innerProcessorWithName.getValue(), handler, level + 1 ); @@ -196,13 +186,8 @@ private static void addModelsAndPipelines( } return; } - if (processorDefinition instanceof Map definitionMap && definitionMap.containsKey(Pipeline.ON_FAILURE_KEY)) { - List> onFailureConfigs = ConfigurationUtils.readList( - null, - null, - (Map) definitionMap, - Pipeline.ON_FAILURE_KEY - ); + if (processorDefinition instanceof Map definitionMap && definitionMap.containsKey(ON_FAILURE_KEY)) { + List> onFailureConfigs = readList(definitionMap, ON_FAILURE_KEY); onFailureConfigs.stream() .flatMap(map -> map.entrySet().stream()) .forEach(entry -> addModelsAndPipelines(entry.getKey(), pipelineId, entry.getValue(), handler, level + 1)); @@ -211,4 +196,16 @@ private static void addModelsAndPipelines( private record PipelineAndModel(String pipelineId, String modelIdOrAlias) {} + /** + * A local alternative to ConfigurationUtils.readList(...) that reads list properties out of the processor configuration map, + * but doesn't rely on mutating the configuration map. + */ + @SuppressWarnings("unchecked") + private static List> readList(Map processorConfig, String key) { + Object val = processorConfig.get(key); + if (val == null) { + throw new IllegalArgumentException("Missing required property [" + key + "]"); + } + return (List>) val; + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index 17088cff8718b..c504ebe56ed45 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -54,6 +54,18 @@ */ public class ExpressionRoleMapping implements ToXContentObject, Writeable { + /** + * Reserved suffix for read-only operator-defined role mappings. + * This suffix is added to the name of all cluster-state role mappings returned via + * the {@code TransportGetRoleMappingsAction} action. + */ + public static final String READ_ONLY_ROLE_MAPPING_SUFFIX = "-read-only-operator-mapping"; + /** + * Reserved metadata field to mark role mappings as read-only. + * This field is added to the metadata of all cluster-state role mappings returned via + * the {@code TransportGetRoleMappingsAction} action. + */ + public static final String READ_ONLY_ROLE_MAPPING_METADATA_FLAG = "_read_only"; private static final ObjectParser PARSER = new ObjectParser<>("role-mapping", Builder::new); /** @@ -136,6 +148,28 @@ public ExpressionRoleMapping(StreamInput in) throws IOException { this.metadata = in.readGenericMap(); } + public static boolean hasReadOnlySuffix(String name) { + return name.endsWith(READ_ONLY_ROLE_MAPPING_SUFFIX); + } + + public static void validateNoReadOnlySuffix(String name) { + if (hasReadOnlySuffix(name)) { + throw new IllegalArgumentException( + "Invalid mapping name [" + name + "]. [" + READ_ONLY_ROLE_MAPPING_SUFFIX + "] is not an allowed suffix" + ); + } + } + + public static String addReadOnlySuffix(String name) { + return name + READ_ONLY_ROLE_MAPPING_SUFFIX; + } + + public static String removeReadOnlySuffixIfPresent(String name) { + return name.endsWith(READ_ONLY_ROLE_MAPPING_SUFFIX) + ? name.substring(0, name.length() - READ_ONLY_ROLE_MAPPING_SUFFIX.length()) + : name; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java index b38b33e082382..74c6223b1ebdd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.core.security.authz; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; @@ -26,8 +28,10 @@ import java.io.IOException; import java.util.Collection; import java.util.EnumSet; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashSet; +import java.util.Map; import java.util.Objects; import java.util.Set; @@ -36,7 +40,11 @@ public final class RoleMappingMetadata extends AbstractNamedDiffable implements Metadata.Custom { + private static final Logger logger = LogManager.getLogger(RoleMappingMetadata.class); + public static final String TYPE = "role_mappings"; + public static final String METADATA_NAME_FIELD = "_es_reserved_role_mapping_name"; + public static final String FALLBACK_NAME = "name_not_available_after_deserialization"; @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -46,12 +54,7 @@ public final class RoleMappingMetadata extends AbstractNamedDiffable ExpressionRoleMapping.parse("name_not_available_after_deserialization", p), - new ParseField(TYPE) - ); + PARSER.declareObjectArray(constructorArg(), (p, c) -> parseWithNameFromMetadata(p), new ParseField(TYPE)); } private static final RoleMappingMetadata EMPTY = new RoleMappingMetadata(Set.of()); @@ -153,4 +156,64 @@ public EnumSet context() { // are not persisted. return ALL_CONTEXTS; } + + /** + * Ensures role mapping names are preserved when stored on disk using XContent format, + * which omits names. This method copies the role mapping's name into a reserved metadata field + * during serialization, allowing recovery during deserialization (e.g., after a master-node restart). + * {@link #parseWithNameFromMetadata(XContentParser)} restores the name during parsing. + */ + public static ExpressionRoleMapping copyWithNameInMetadata(ExpressionRoleMapping roleMapping) { + Map metadata = new HashMap<>(roleMapping.getMetadata()); + // note: can't use Maps.copyWith... since these create maps that don't support `null` values in map entries + if (metadata.put(METADATA_NAME_FIELD, roleMapping.getName()) != null) { + logger.error( + "Metadata field [{}] is reserved and will be overwritten with an internal system value. " + + "Rename this field in your role mapping configuration.", + METADATA_NAME_FIELD + ); + } + return new ExpressionRoleMapping( + roleMapping.getName(), + roleMapping.getExpression(), + roleMapping.getRoles(), + roleMapping.getRoleTemplates(), + metadata, + roleMapping.isEnabled() + ); + } + + /** + * If a role mapping does not yet have a name persisted in metadata, it will use a constant fallback name. This method checks if a + * role mapping has the fallback name. + */ + public static boolean hasFallbackName(ExpressionRoleMapping expressionRoleMapping) { + return expressionRoleMapping.getName().equals(FALLBACK_NAME); + } + + /** + * Parse a role mapping from XContent, restoring the name from a reserved metadata field. + * Used to parse a role mapping annotated with its name in metadata via @see {@link #copyWithNameInMetadata(ExpressionRoleMapping)}. + */ + public static ExpressionRoleMapping parseWithNameFromMetadata(XContentParser parser) throws IOException { + ExpressionRoleMapping roleMapping = ExpressionRoleMapping.parse(FALLBACK_NAME, parser); + return new ExpressionRoleMapping( + getNameFromMetadata(roleMapping), + roleMapping.getExpression(), + roleMapping.getRoles(), + roleMapping.getRoleTemplates(), + roleMapping.getMetadata(), + roleMapping.isEnabled() + ); + } + + private static String getNameFromMetadata(ExpressionRoleMapping roleMapping) { + Map metadata = roleMapping.getMetadata(); + if (metadata.containsKey(METADATA_NAME_FIELD) && metadata.get(METADATA_NAME_FIELD) instanceof String name) { + return name; + } else { + // This is valid the first time we recover from cluster-state: the old format metadata won't have a name stored in metadata yet + return FALLBACK_NAME; + } + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java index 5ba5c1fd1218a..23c93226d5494 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java @@ -53,7 +53,7 @@ public final class ApplicationPermission { return new PermissionEntry( appPriv, Sets.union(existing.resourceNames, resourceNames), - Automatons.unionAndMinimize(Arrays.asList(existing.resourceAutomaton, patterns)) + Automatons.unionAndDeterminize(Arrays.asList(existing.resourceAutomaton, patterns)) ); } })); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java index 4e608281a7858..5f3da8f73a708 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ClusterPermission.java @@ -137,7 +137,7 @@ public ClusterPermission build() { } List checks = this.permissionChecks; if (false == actionAutomatons.isEmpty()) { - final Automaton mergedAutomaton = Automatons.unionAndMinimize(this.actionAutomatons); + final Automaton mergedAutomaton = Automatons.unionAndDeterminize(this.actionAutomatons); checks = new ArrayList<>(this.permissionChecks.size() + 1); checks.add(new AutomatonPermissionCheck(mergedAutomaton)); checks.addAll(this.permissionChecks); @@ -156,7 +156,7 @@ private static Automaton createAutomaton(Set allowedActionPatterns, Set< } else { final Automaton allowedAutomaton = Automatons.patterns(allowedActionPatterns); final Automaton excludedAutomaton = Automatons.patterns(excludeActionPatterns); - return Automatons.minusAndMinimize(allowedAutomaton, excludedAutomaton); + return Automatons.minusAndDeterminize(allowedAutomaton, excludedAutomaton); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java index 235d7419d2bf0..ed7bbf9158278 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissions.java @@ -147,7 +147,7 @@ public static Automaton initializePermittedFieldsAutomaton(FieldPermissionsDefin List automatonList = groups.stream() .map(g -> FieldPermissions.buildPermittedFieldsAutomaton(g.getGrantedFields(), g.getExcludedFields())) .collect(Collectors.toList()); - return Automatons.unionAndMinimize(automatonList); + return Automatons.unionAndDeterminize(automatonList); } /** @@ -189,7 +189,7 @@ public static Automaton buildPermittedFieldsAutomaton(final String[] grantedFiel ); } - grantedFieldsAutomaton = Automatons.minusAndMinimize(grantedFieldsAutomaton, deniedFieldsAutomaton); + grantedFieldsAutomaton = Automatons.minusAndDeterminize(grantedFieldsAutomaton, deniedFieldsAutomaton); return grantedFieldsAutomaton; } @@ -206,7 +206,10 @@ public static Automaton buildPermittedFieldsAutomaton(final String[] grantedFiel public FieldPermissions limitFieldPermissions(FieldPermissions limitedBy) { if (hasFieldLevelSecurity() && limitedBy != null && limitedBy.hasFieldLevelSecurity()) { // TODO: cache the automaton computation with FieldPermissionsCache - Automaton _permittedFieldsAutomaton = Automatons.intersectAndMinimize(getIncludeAutomaton(), limitedBy.getIncludeAutomaton()); + Automaton _permittedFieldsAutomaton = Automatons.intersectAndDeterminize( + getIncludeAutomaton(), + limitedBy.getIncludeAutomaton() + ); return new FieldPermissions( CollectionUtils.concatLists(fieldPermissionsDefinitions, limitedBy.fieldPermissionsDefinitions), _permittedFieldsAutomaton diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java index 46261937a0228..a1e14bfde8aa5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/FieldPermissionsCache.java @@ -107,7 +107,7 @@ FieldPermissions union(Collection fieldPermissionsCollection) List automatonList = fieldPermissionsCollection.stream() .map(FieldPermissions::getIncludeAutomaton) .collect(Collectors.toList()); - return new FieldPermissions(key, Automatons.unionAndMinimize(automatonList)); + return new FieldPermissions(key, Automatons.unionAndDeterminize(automatonList)); }); } catch (ExecutionException e) { throw new ElasticsearchException("unable to compute field permissions", e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java index 558f8e6f22ac1..cdd5a6f6ff72d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/IndicesPermission.java @@ -283,14 +283,14 @@ public boolean checkResourcePrivileges( for (String forIndexPattern : checkForIndexPatterns) { Automaton checkIndexAutomaton = Automatons.patterns(forIndexPattern); if (false == allowRestrictedIndices && false == isConcreteRestrictedIndex(forIndexPattern)) { - checkIndexAutomaton = Automatons.minusAndMinimize(checkIndexAutomaton, restrictedIndices.getAutomaton()); + checkIndexAutomaton = Automatons.minusAndDeterminize(checkIndexAutomaton, restrictedIndices.getAutomaton()); } if (false == Operations.isEmpty(checkIndexAutomaton)) { Automaton allowedIndexPrivilegesAutomaton = null; for (var indexAndPrivilegeAutomaton : indexGroupAutomatons.entrySet()) { if (Automatons.subsetOf(checkIndexAutomaton, indexAndPrivilegeAutomaton.getValue())) { if (allowedIndexPrivilegesAutomaton != null) { - allowedIndexPrivilegesAutomaton = Automatons.unionAndMinimize( + allowedIndexPrivilegesAutomaton = Automatons.unionAndDeterminize( Arrays.asList(allowedIndexPrivilegesAutomaton, indexAndPrivilegeAutomaton.getKey()) ); } else { @@ -342,7 +342,7 @@ public Automaton allowedActionsMatcher(String index) { automatonList.add(group.privilege.getAutomaton()); } } - return automatonList.isEmpty() ? Automatons.EMPTY : Automatons.unionAndMinimize(automatonList); + return automatonList.isEmpty() ? Automatons.EMPTY : Automatons.unionAndDeterminize(automatonList); } /** @@ -704,7 +704,7 @@ private Map indexGroupAutomatons(boolean combine) { Automaton indexAutomaton = group.getIndexMatcherAutomaton(); allAutomatons.compute( group.privilege().getAutomaton(), - (key, value) -> value == null ? indexAutomaton : Automatons.unionAndMinimize(List.of(value, indexAutomaton)) + (key, value) -> value == null ? indexAutomaton : Automatons.unionAndDeterminize(List.of(value, indexAutomaton)) ); if (combine) { List> combinedAutomatons = new ArrayList<>(); @@ -714,7 +714,7 @@ private Map indexGroupAutomatons(boolean combine) { group.privilege().getAutomaton() ); if (Operations.isEmpty(intersectingPrivileges) == false) { - Automaton indexPatternAutomaton = Automatons.unionAndMinimize( + Automaton indexPatternAutomaton = Automatons.unionAndDeterminize( List.of(indexAndPrivilegeAutomatons.getValue(), indexAutomaton) ); combinedAutomatons.add(new Tuple<>(intersectingPrivileges, indexPatternAutomaton)); @@ -723,7 +723,7 @@ private Map indexGroupAutomatons(boolean combine) { combinedAutomatons.forEach( automatons -> allAutomatons.compute( automatons.v1(), - (key, value) -> value == null ? automatons.v2() : Automatons.unionAndMinimize(List.of(value, automatons.v2())) + (key, value) -> value == null ? automatons.v2() : Automatons.unionAndDeterminize(List.of(value, automatons.v2())) ) ); } @@ -768,7 +768,7 @@ public Group( this.indexNameMatcher = StringMatcher.of(indices).and(name -> restrictedIndices.isRestricted(name) == false); this.indexNameAutomaton = () -> indexNameAutomatonMemo.computeIfAbsent( indices, - k -> Automatons.minusAndMinimize(Automatons.patterns(indices), restrictedIndices.getAutomaton()) + k -> Automatons.minusAndDeterminize(Automatons.patterns(indices), restrictedIndices.getAutomaton()) ); } this.fieldPermissions = Objects.requireNonNull(fieldPermissions); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java index ea32ba13ae576..e4d283aba75a3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/LimitedRole.java @@ -212,7 +212,7 @@ public IsResourceAuthorizedPredicate allowedIndicesMatcher(String action) { public Automaton allowedActionsMatcher(String index) { final Automaton allowedMatcher = baseRole.allowedActionsMatcher(index); final Automaton limitedByMatcher = limitedByRole.allowedActionsMatcher(index); - return Automatons.intersectAndMinimize(allowedMatcher, limitedByMatcher); + return Automatons.intersectAndDeterminize(allowedMatcher, limitedByMatcher); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 7174b2f616c2a..f4df99dcefea4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -57,7 +57,7 @@ import static java.util.Map.entry; import static org.elasticsearch.xpack.core.security.support.Automatons.patterns; -import static org.elasticsearch.xpack.core.security.support.Automatons.unionAndMinimize; +import static org.elasticsearch.xpack.core.security.support.Automatons.unionAndDeterminize; /** * The name of an index related action always being with `indices:` followed by a sequence of slash-separated terms @@ -110,7 +110,7 @@ public final class IndexPrivilege extends Privilege { private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*", "indices:data/write/bulk*"); private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", TransportAutoPutMappingAction.TYPE.name()); private static final Automaton MONITOR_AUTOMATON = patterns("indices:monitor/*"); - private static final Automaton MANAGE_AUTOMATON = unionAndMinimize( + private static final Automaton MANAGE_AUTOMATON = unionAndDeterminize( Arrays.asList( MONITOR_AUTOMATON, patterns("indices:admin/*", TransportFieldCapabilitiesAction.NAME + "*", GetRollupIndexCapsAction.NAME + "*") @@ -303,7 +303,7 @@ private static IndexPrivilege resolve(Set name) { if (actions.isEmpty() == false) { automata.add(patterns(actions)); } - return new IndexPrivilege(name, unionAndMinimize(automata)); + return new IndexPrivilege(name, unionAndDeterminize(automata)); } static Map values() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 0028508e87f32..5fb753ab55aab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -152,8 +152,11 @@ static RoleDescriptor kibanaSystem(String name) { // Data telemetry reads mappings, metadata and stats of indices RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("view_index_metadata", "monitor").build(), // Endpoint diagnostic information. Kibana reads from these indices to send - // telemetry - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.diagnostic.collection-*").privileges("read").build(), + // telemetry and also creates the index when policies are first created + RoleDescriptor.IndicesPrivileges.builder() + .indices(".logs-endpoint.diagnostic.collection-*") + .privileges("read", "create_index") + .build(), // Fleet secrets. Kibana can only write to this index. RoleDescriptor.IndicesPrivileges.builder() .indices(".fleet-secrets*") @@ -277,17 +280,19 @@ static RoleDescriptor kibanaSystem(String name) { ) .build(), // Endpoint specific action responses. Kibana reads and writes (for third party - // agents) to the index - // to display action responses to the user. + // agents) to the index to display action responses to the user. + // `create_index`: is necessary in order to ensure that the DOT datastream index is + // created by Kibana in order to avoid errors on the Elastic Defend side when streaming + // documents to it. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.action.responses-*") - .privileges("auto_configure", "read", "write") + .privileges("auto_configure", "read", "write", "create_index") .build(), // Endpoint specific actions. Kibana reads and writes to this index to track new // actions and display them. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.actions-*") - .privileges("auto_configure", "read", "write") + .privileges("auto_configure", "read", "write", "create_index") .build(), // Legacy Osquery manager specific action responses. Kibana reads from these to // display responses to the user. @@ -475,7 +480,7 @@ static RoleDescriptor kibanaSystem(String name) { RoleDescriptor.IndicesPrivileges.builder().indices(".slo-observability.*").privileges("all").build(), // Endpoint heartbeat. Kibana reads from these to determine metering/billing for // endpoints. - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read").build(), + RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.heartbeat-*").privileges("read", "create_index").build(), // For connectors telemetry. Will be removed once we switched to connectors API RoleDescriptor.IndicesPrivileges.builder().indices(".elastic-connectors*").privileges("read").build() }, null, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java index 201cb4b69e472..d3790ea64ba4b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/Automatons.java @@ -112,7 +112,7 @@ public static Automaton patterns(Collection patterns) { private static Automaton buildAutomaton(Collection patterns) { if (patterns.size() == 1) { - return minimize(pattern(patterns.iterator().next())); + return determinize(pattern(patterns.iterator().next())); } final Function, Automaton> build = strings -> { @@ -121,7 +121,7 @@ private static Automaton buildAutomaton(Collection patterns) { final Automaton patternAutomaton = pattern(pattern); automata.add(patternAutomaton); } - return unionAndMinimize(automata); + return unionAndDeterminize(automata); }; // We originally just compiled each automaton separately and then unioned them all. @@ -188,7 +188,7 @@ private static Automaton buildAutomaton(Collection patterns) { if (misc.isEmpty() == false) { automata.add(build.apply(misc)); } - return unionAndMinimize(automata); + return unionAndDeterminize(automata); } /** @@ -277,22 +277,22 @@ static Automaton wildcard(String text) { return Operations.determinize(concatenate(automata), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT); } - public static Automaton unionAndMinimize(Collection automata) { + public static Automaton unionAndDeterminize(Collection automata) { Automaton res = automata.size() == 1 ? automata.iterator().next() : union(automata); - return minimize(res); + return determinize(res); } - public static Automaton minusAndMinimize(Automaton a1, Automaton a2) { + public static Automaton minusAndDeterminize(Automaton a1, Automaton a2) { Automaton res = minus(a1, a2, maxDeterminizedStates); - return minimize(res); + return determinize(res); } - public static Automaton intersectAndMinimize(Automaton a1, Automaton a2) { + public static Automaton intersectAndDeterminize(Automaton a1, Automaton a2) { Automaton res = intersection(a1, a2); - return minimize(res); + return determinize(res); } - private static Automaton minimize(Automaton automaton) { + private static Automaton determinize(Automaton automaton) { return Operations.determinize(automaton, maxDeterminizedStates); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java index 42fc7c196bbcf..7b0bd8a8108e9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/put/UpdateWatcherSettingsAction.java @@ -16,13 +16,13 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.UpdateForV9; import java.io.IOException; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; public class UpdateWatcherSettingsAction extends ActionType { @@ -34,6 +34,16 @@ public class UpdateWatcherSettingsAction extends ActionType ALLOWED_SETTINGS_PREFIXES = Set.of( + IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX, + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX, + IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + ); + + public static final Set EXPLICITLY_DENIED_SETTINGS = Set.of( + IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_PREFIX + "._tier_preference" + ); + public UpdateWatcherSettingsAction() { super(NAME); } @@ -79,13 +89,25 @@ public Map settings() { @Override public ActionRequestValidationException validate() { - Set forbiddenSettings = Sets.difference(settings.keySet(), ALLOWED_SETTING_KEYS); - if (forbiddenSettings.size() > 0) { + Set forbiddenSettings = settings.keySet() + .stream() + .filter( + setting -> (ALLOWED_SETTING_KEYS.contains(setting) == false + && ALLOWED_SETTINGS_PREFIXES.stream().noneMatch(prefix -> setting.startsWith(prefix + "."))) + || EXPLICITLY_DENIED_SETTINGS.contains(setting) + ) + .collect(Collectors.toSet()); + + if (forbiddenSettings.isEmpty() == false) { return ValidateActions.addValidationError( "illegal settings: " + forbiddenSettings + ", these settings may not be configured. Only the following settings may be configured: " - + ALLOWED_SETTING_KEYS, + + ALLOWED_SETTING_KEYS + + ", " + + ALLOWED_SETTINGS_PREFIXES.stream().map(s -> s + ".*").collect(Collectors.toSet()) + + " excluding the following explicitly denied settings: " + + EXPLICITLY_DENIED_SETTINGS, null ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java index 730d994fc5e35..46fc8a36c2c2b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentRequestTests.java @@ -67,7 +67,7 @@ public static Request createRandom() { request.setNumberOfAllocations(randomIntBetween(1, 8)); } if (randomBoolean()) { - request.setQueueCapacity(randomIntBetween(1, 1000000)); + request.setQueueCapacity(randomIntBetween(1, 100_000)); } if (randomBoolean()) { request.setPriority(randomFrom(Priority.values()).toString()); @@ -168,7 +168,7 @@ public void testValidate_GivenQueueCapacityIsNegative() { public void testValidate_GivenQueueCapacityIsAtLimit() { Request request = createRandom(); - request.setQueueCapacity(1_000_000); + request.setQueueCapacity(100_000); ActionRequestValidationException e = request.validate(); @@ -177,12 +177,12 @@ public void testValidate_GivenQueueCapacityIsAtLimit() { public void testValidate_GivenQueueCapacityIsOverLimit() { Request request = createRandom(); - request.setQueueCapacity(1_000_001); + request.setQueueCapacity(100_001); ActionRequestValidationException e = request.validate(); assertThat(e, is(not(nullValue()))); - assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 1000000")); + assertThat(e.getMessage(), containsString("[queue_capacity] must be less than 100000")); } public void testValidate_GivenTimeoutIsNegative() { @@ -234,6 +234,6 @@ public void testDefaults() { assertThat(request.getNumberOfAllocations(), nullValue()); assertThat(request.computeNumberOfAllocations(), equalTo(1)); assertThat(request.getThreadsPerAllocation(), equalTo(1)); - assertThat(request.getQueueCapacity(), equalTo(1024)); + assertThat(request.getQueueCapacity(), equalTo(10_000)); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 26b306d6f1334..a71ac6a9b51fd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -801,7 +801,7 @@ public void testKibanaSystemRole() { assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(mockIndexAbstraction(index)), is(true)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(mockIndexAbstraction(index)), - is(false) + is(true) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(mockIndexAbstraction(index)), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(mockIndexAbstraction(index)), is(false)); @@ -949,7 +949,7 @@ public void testKibanaSystemRole() { ); }); - // read-only index for Endpoint and Osquery manager specific action responses + // Elastic Defend internal index for response actions results Arrays.asList(".logs-endpoint.action.responses-" + randomAlphaOfLength(randomIntBetween(0, 13))).forEach((index) -> { final IndexAbstraction indexAbstraction = mockIndexAbstraction(index); assertThat(kibanaRole.indices().allowedIndicesMatcher("indices:foo").test(indexAbstraction), is(false)); @@ -959,10 +959,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1069,10 +1066,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1097,10 +1091,7 @@ public void testKibanaSystemRole() { is(false) ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); - assertThat( - kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) - ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1319,12 +1310,21 @@ public void testKibanaSystemRole() { final boolean isAlsoAutoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") || indexName.startsWith(".logs-endpoint.action.responses-"); + + final boolean isAlsoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") + || indexName.startsWith(".logs-endpoint.action.responses-") + || indexName.startsWith(".logs-endpoint.diagnostic.collection-") + || indexName.startsWith(".logs-endpoint.heartbeat-"); + assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportCreateIndexAction.TYPE.name()).test(indexAbstraction), - is(false) + is(isAlsoCreateIndex) + ); + assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoCreateIndex)); + assertThat( + kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), + is(isAlsoCreateIndex) ); - assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), is(false)); assertThat( kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex) diff --git a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle index 61aa2927e46de..6b1c7e42c0fde 100644 --- a/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/downsample/qa/mixed-cluster/build.gradle @@ -5,7 +5,7 @@ * 2.0. */ -import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -26,7 +26,7 @@ restResources { } def supportedVersion = bwcVersion -> { - return bwcVersion.onOrAfter("8.10.0"); + return bwcVersion.onOrAfter("8.10.0") && bwcVersion != VersionProperties.elasticsearchVersion } BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java index 3c5bf2c18c915..b8ea7658a8247 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/blockhash/BytesRefBlockHash.java @@ -8,12 +8,9 @@ package org.elasticsearch.compute.aggregation.blockhash; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; -import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.SeenGroupIds; @@ -30,8 +27,6 @@ import org.elasticsearch.compute.operator.mvdedupe.MultivalueDedupeInt; import org.elasticsearch.core.ReleasableIterator; -import java.io.IOException; - /** * Maps a {@link BytesRefBlock} column to group ids. * This class is generated. Do not edit it. @@ -197,26 +192,21 @@ public BytesRefBlock[] getKeys() { * without and still read from the block. */ // TODO replace with takeBytesRefsOwnership ?! + final BytesRef spare = new BytesRef(); if (seenNull) { try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size() + 1))) { builder.appendNull(); - BytesRef spare = new BytesRef(); for (long i = 0; i < hash.size(); i++) { builder.appendBytesRef(hash.get(i, spare)); } return new BytesRefBlock[] { builder.build() }; } } - - final int size = Math.toIntExact(hash.size()); - try (BytesStreamOutput out = new BytesStreamOutput()) { - hash.getBytesRefs().writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new BytesRefBlock[] { - blockFactory.newBytesRefArrayVector(new BytesRefArray(in, BigArrays.NON_RECYCLING_INSTANCE), size).asBlock() }; + try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size()))) { + for (long i = 0; i < hash.size(); i++) { + builder.appendBytesRef(hash.get(i, spare)); } - } catch (IOException e) { - throw new IllegalStateException(e); + return new BytesRefBlock[] { builder.build() }; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java index 3e38b6d6fe9fa..8d3dbf3164c47 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunction.java @@ -97,7 +97,7 @@ public void addIntermediateInput(int positionOffset, IntVector groupIdVector, Pa @Override public void addIntermediateRowInput(int groupId, GroupingAggregatorFunction input, int position) { - next.addIntermediateRowInput(groupId, input, position); + next.addIntermediateRowInput(groupId, ((FilteredGroupingAggregatorFunction) input).next(), position); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st index 7c21cff56d7bb..2a3d1143236ac 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/X-BlockHash.java.st @@ -9,15 +9,10 @@ package org.elasticsearch.compute.aggregation.blockhash; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; $endif$ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BitArray; -$if(BytesRef)$ -import org.elasticsearch.common.util.BytesRefArray; -$endif$ import org.elasticsearch.common.util.$Hash$; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.aggregation.SeenGroupIds; @@ -58,8 +53,6 @@ $endif$ import org.elasticsearch.core.ReleasableIterator; $if(BytesRef)$ -import java.io.IOException; - $else$ import java.util.BitSet; @@ -250,26 +243,21 @@ $if(BytesRef)$ * without and still read from the block. */ // TODO replace with takeBytesRefsOwnership ?! + final BytesRef spare = new BytesRef(); if (seenNull) { try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size() + 1))) { builder.appendNull(); - BytesRef spare = new BytesRef(); for (long i = 0; i < hash.size(); i++) { builder.appendBytesRef(hash.get(i, spare)); } return new BytesRefBlock[] { builder.build() }; } } - - final int size = Math.toIntExact(hash.size()); - try (BytesStreamOutput out = new BytesStreamOutput()) { - hash.getBytesRefs().writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new BytesRefBlock[] { - blockFactory.newBytesRefArrayVector(new BytesRefArray(in, BigArrays.NON_RECYCLING_INSTANCE), size).asBlock() }; + try (var builder = blockFactory.newBytesRefBlockBuilder(Math.toIntExact(hash.size()))) { + for (long i = 0; i < hash.size(); i++) { + builder.appendBytesRef(hash.get(i, spare)); } - } catch (IOException e) { - throw new IllegalStateException(e); + return new BytesRefBlock[] { builder.build() }; } $else$ if (seenNull) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index ee747d98c26f8..74affb10eaf20 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -241,6 +241,12 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa } } + SourceLoader sourceLoader = null; + if (storedFieldsSpec.requiresSource()) { + sourceLoader = shardContexts.get(shard).newSourceLoader.get(); + storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(true, false, sourceLoader.requiredStoredFields())); + } + if (rowStrideReaders.isEmpty()) { return; } @@ -259,7 +265,7 @@ private void loadFromSingleLeaf(Block[] blocks, int shard, int segment, BlockLoa } BlockLoaderStoredFieldsFromLeafLoader storedFields = new BlockLoaderStoredFieldsFromLeafLoader( storedFieldLoader.getLoader(ctx, null), - storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null + sourceLoader != null ? sourceLoader.leaf(ctx.reader(), null) : null ); for (int p = 0; p < docs.count(); p++) { int doc = docs.get(p); @@ -381,13 +387,18 @@ private void fieldsMoved(LeafReaderContext ctx, int shard) throws IOException { FieldWork field = fields[f]; rowStride[f] = field.rowStride(ctx); storedFieldsSpec = storedFieldsSpec.merge(field.loader.rowStrideStoredFieldSpec()); - storedFields = new BlockLoaderStoredFieldsFromLeafLoader( - StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), - storedFieldsSpec.requiresSource() ? shardContexts.get(shard).newSourceLoader.get().leaf(ctx.reader(), null) : null - ); - if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { - trackStoredFields(storedFieldsSpec, false); - } + } + SourceLoader sourceLoader = null; + if (storedFieldsSpec.requiresSource()) { + sourceLoader = shardContexts.get(shard).newSourceLoader.get(); + storedFieldsSpec = storedFieldsSpec.merge(new StoredFieldsSpec(true, false, sourceLoader.requiredStoredFields())); + } + storedFields = new BlockLoaderStoredFieldsFromLeafLoader( + StoredFieldLoader.fromSpec(storedFieldsSpec).getLoader(ctx, null), + sourceLoader != null ? sourceLoader.leaf(ctx.reader(), null) : null + ); + if (false == storedFieldsSpec.equals(StoredFieldsSpec.NO_REQUIREMENTS)) { + trackStoredFields(storedFieldsSpec, false); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 5e0e625abb914..7cf47bc7fed1c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -372,8 +372,8 @@ static final class OrdinalSegmentAggregator implements Releasable, SeenGroupIds } void addInput(IntVector docs, Page page) { + GroupingAggregatorFunction.AddInput[] prepared = new GroupingAggregatorFunction.AddInput[aggregators.size()]; try { - GroupingAggregatorFunction.AddInput[] prepared = new GroupingAggregatorFunction.AddInput[aggregators.size()]; for (int i = 0; i < prepared.length; i++) { prepared[i] = aggregators.get(i).prepareProcessPage(this, page); } @@ -392,7 +392,7 @@ void addInput(IntVector docs, Page page) { } catch (IOException e) { throw new UncheckedIOException(e); } finally { - page.releaseBlocks(); + Releasables.close(page::releaseBlocks, Releasables.wrap(prepared)); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java index da2c3502144db..35ecced470e01 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredAggregatorFunctionTests.java @@ -27,7 +27,6 @@ public class FilteredAggregatorFunctionTests extends AggregatorFunctionTestCase { private final List unclosed = Collections.synchronizedList(new ArrayList<>()); - // TODO some version of this test that applies across all aggs @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { return new FilteredAggregatorFunctionSupplier( diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java index 87cb99bd0709f..26971dc927cd1 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/FilteredGroupingAggregatorFunctionTests.java @@ -11,12 +11,14 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.LongIntBlockSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Tuple; import org.junit.After; @@ -31,7 +33,6 @@ public class FilteredGroupingAggregatorFunctionTests extends GroupingAggregatorFunctionTestCase { private final List unclosed = Collections.synchronizedList(new ArrayList<>()); - // TODO some version of this test that applies across all aggs @Override protected AggregatorFunctionSupplier aggregatorFunction(List inputChannels) { return new FilteredAggregatorFunctionSupplier( @@ -104,6 +105,42 @@ protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { ); } + /** + * Tests {@link GroupingAggregator#addIntermediateRow} by building results using the traditional + * add mechanism and using {@link GroupingAggregator#addIntermediateRow} then asserting that they + * produce the same output. + */ + public void testAddIntermediateRowInput() { + DriverContext ctx = driverContext(); + AggregatorFunctionSupplier supplier = aggregatorFunction(channels(AggregatorMode.SINGLE)); + Block[] results = new Block[2]; + try ( + GroupingAggregatorFunction main = supplier.groupingAggregator(ctx); + GroupingAggregatorFunction leaf = supplier.groupingAggregator(ctx); + SourceOperator source = simpleInput(ctx.blockFactory(), 10); + ) { + Page p; + while ((p = source.getOutput()) != null) { + try ( + IntVector group = ctx.blockFactory().newConstantIntVector(0, p.getPositionCount()); + GroupingAggregatorFunction.AddInput addInput = leaf.prepareProcessPage(null, p) + ) { + addInput.add(0, group); + } finally { + p.releaseBlocks(); + } + } + main.addIntermediateRowInput(0, leaf, 0); + try (IntVector selected = ctx.blockFactory().newConstantIntVector(0, 1)) { + main.evaluateFinal(results, 0, selected, ctx); + leaf.evaluateFinal(results, 1, selected, ctx); + } + assertThat(results[0], equalTo(results[1])); + } finally { + Releasables.close(results); + } + } + @After public void checkUnclosed() { for (Exception tracker : unclosed) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index 9414e076a26e6..cb190dfffafb9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -89,14 +89,17 @@ protected final Operator.OperatorFactory simpleWithMode(AggregatorMode mode) { return simpleWithMode(mode, Function.identity()); } + protected List channels(AggregatorMode mode) { + return mode.isInputPartial() ? range(1, 1 + aggregatorIntermediateBlockCount()).boxed().toList() : List.of(1); + } + private Operator.OperatorFactory simpleWithMode( AggregatorMode mode, Function wrap ) { - List channels = mode.isInputPartial() ? range(1, 1 + aggregatorIntermediateBlockCount()).boxed().toList() : List.of(1); int emitChunkSize = between(100, 200); - AggregatorFunctionSupplier supplier = wrap.apply(aggregatorFunction(channels)); + AggregatorFunctionSupplier supplier = wrap.apply(aggregatorFunction(channels(mode))); if (randomBoolean()) { supplier = chunkGroups(emitChunkSize, supplier); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java index aeea18e52da0f..088e791348840 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java @@ -1147,7 +1147,7 @@ public void testLongBytesRefHashWithMultiValuedFields() { } else { assertThat( ordsAndKeys.description, - equalTo("BytesRefLongBlockHash{keys=[BytesRefKey[channel=1], LongKey[channel=0]], entries=9, size=491b}") + equalTo("BytesRefLongBlockHash{keys=[BytesRefKey[channel=1], LongKey[channel=0]], entries=9, size=483b}") ); assertOrds( ordsAndKeys.ords, diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle index e4223f03c3a03..fb47255e8d52e 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -1,5 +1,6 @@ import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -27,7 +28,7 @@ GradleUtils.extendSourceSet(project, "javaRestTest", "yamlRestTest") // ESQL is available in 8.11 or later def supportedVersion = bwcVersion -> { - return bwcVersion.onOrAfter(Version.fromString("8.11.0")); + return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv index 83a2f3cb1c281..029c3baf3cbfb 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv @@ -7,3 +7,4 @@ millis:date,nanos:date_nanos,num:long 2023-10-23T12:27:28.948Z,2023-10-23T12:27:28.948000000Z,1698064048948000000 2023-10-23T12:15:03.360Z,2023-10-23T12:15:03.360103847Z,1698063303360103847 1999-10-23T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-02-23T13:33:34.937193000Z, 2023-01-23T13:55:01.543123456Z], 0 +1999-10-22T12:15:03.360Z,[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z], 0 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec index ad7149b0f742f..515e2c9c6587f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date_nanos.csv-spec @@ -38,9 +38,10 @@ nanos:date_nanos mv_min on date nanos required_capability: date_nanos_type -FROM date_nanos | SORT millis ASC | EVAL nanos = MV_MIN(nanos) | KEEP nanos | LIMIT 1; +FROM date_nanos | SORT millis ASC | WHERE millis < "2000-01-01" | EVAL nanos = MV_MIN(nanos) | KEEP nanos; nanos:date_nanos +2023-03-23T12:15:03.360103847Z 2023-01-23T13:55:01.543123456Z ; @@ -56,9 +57,10 @@ ct:integer mv_first on date nanos required_capability: date_nanos_type -FROM date_nanos | SORT millis ASC | EVAL nanos = MV_FIRST(nanos) | KEEP nanos | LIMIT 1; +FROM date_nanos | SORT millis ASC | WHERE millis < "2000-01-01" | EVAL nanos = MV_FIRST(nanos) | KEEP nanos; nanos:date_nanos +2023-03-23T12:15:03.360103847Z 2023-01-23T13:55:01.543123456Z ; @@ -263,3 +265,69 @@ ROW a = TO_DATE_NANOS(null), b = TO_DATE_NANOS(null + 1::long), c = TO_DATE_NANO a:date_nanos | b:date_nanos | c:date_nanos null | null | null ; + +Coalasce date nanos +required_capability: to_date_nanos + +ROW a = COALESCE(null, TO_DATE_NANOS(1698069301543123456)); + +a:date_nanos +2023-10-23T13:55:01.543123456Z +; + +Case date nanos result +required_capability: to_date_nanos + +ROW a = CASE(false, TO_DATE_NANOS(0::long), TO_DATE_NANOS(1698069301543123456)); + +a:date_nanos +2023-10-23T13:55:01.543123456Z +; + +Greatest date nanos +required_capability: least_greatest_for_datenanos + +ROW a = GREATEST(TO_DATE_NANOS("2023-10-23T13:55:01.543123456"), TO_DATE_NANOS("2023-10-23T13:53:55.832987654")); + +a:date_nanos +2023-10-23T13:55:01.543123456Z +; + +Least date nanos +required_capability: least_greatest_for_datenanos + +ROW a = LEAST(TO_DATE_NANOS("2023-10-23T13:55:01.543123456"), TO_DATE_NANOS("2023-10-23T13:53:55.832987654")); + +a:date_nanos +2023-10-23T13:53:55.832987654Z +; + +mv_dedup over date nanos +required_capability: date_nanos_type + +FROM date_nanos | WHERE millis < "2000-01-01" | EVAL a = MV_DEDUPE(nanos) | SORT millis DESC | KEEP a; + +a:date_nanos +[2023-01-23T13:55:01.543123456Z, 2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] +2023-03-23T12:15:03.360103847Z +; + +mv_sort over date nanos +required_capability: date_nanos_type + +FROM date_nanos | WHERE millis < "2000-01-01" | EVAL a = MV_SORT(nanos, "asc") | SORT millis DESC | KEEP a; + +a:date_nanos +[2023-01-23T13:55:01.543123456Z, 2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] +[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z] +; + +mv_slice over date nanos +required_capability: date_nanos_type + +FROM date_nanos | WHERE millis < "2000-01-01" | EVAL a = MV_SLICE(MV_SORT(nanos, "asc"), 1, 2) | SORT millis DESC | KEEP a; + +a:date_nanos +[2023-02-23T13:33:34.937193000Z, 2023-03-23T12:15:03.360103847Z] +[2023-03-23T12:15:03.360103847Z, 2023-03-23T12:15:03.360103847Z] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index ac4351413129e..2dc21a86e6394 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -2291,6 +2291,33 @@ m:integer |a:double |x:integer 74999 |48249.0 |0 ; +statsWithFilterOnGroups +required_capability: fix_filter_pushdown_past_stats +FROM employees +| STATS v = VALUES(emp_no) by job_positions | WHERE job_positions == "Accountant" | MV_EXPAND v | SORT v +; + +v:integer | job_positions:keyword + 10001 | Accountant + 10012 | Accountant + 10016 | Accountant + 10023 | Accountant + 10025 | Accountant + 10028 | Accountant + 10034 | Accountant + 10037 | Accountant + 10044 | Accountant + 10045 | Accountant + 10050 | Accountant + 10051 | Accountant + 10066 | Accountant + 10081 | Accountant + 10085 | Accountant + 10089 | Accountant + 10092 | Accountant + 10094 | Accountant +; + statsWithFiltering required_capability: per_agg_filtering @@ -2502,3 +2529,129 @@ FROM employees | eval x = [1,2,3], y = 5 + 6 | stats m = max(y) by y+1 m:integer | y+1:integer 11 | 12 ; + +filterIsAlwaysTrue +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE salary > 0 +; + +max:integer +74999 +; + +filterIsAlwaysFalse +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE first_name == "" +; + +max:integer +null +; + +filterSometimesMatches +required_capability: per_agg_filtering +FROM employees +| STATS max = max(salary) WHERE first_name IS NULL +; + +max:integer +70011 +; + +groupingFilterIsAlwaysTrue +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE salary > 0 BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +74970 | A +58121 | B +74999 | D +58715 | H +; + +groupingFilterIsAlwaysFalse +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE first_name == "" BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +null | A +null | B +null | D +null | H +; + +groupingFilterSometimesMatches +required_capability: per_agg_filtering +FROM employees +| MV_EXPAND job_positions +| STATS max = max(salary) WHERE first_name IS NULL BY job_positions = SUBSTRING(job_positions, 1, 1) +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +62233 | A +39878 | B +67492 | D +null | H +; + +groupingByOrdinalsFilterIsAlwaysTrue +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE salary > 0 BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +74970 | Accountant +69904 | Architect +58121 | Business Analyst +74999 | Data Scientist +; + +groupingByOrdinalsFilterIsAlwaysFalse +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE first_name == "" BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +null | Accountant +null | Architect +null | Business Analyst +null | Data Scientist +; + +groupingByOrdinalsFilterSometimesMatches +required_capability: per_agg_filtering +required_capability: per_agg_filtering_ords +FROM employees +| STATS max = max(salary) WHERE first_name IS NULL BY job_positions +| SORT job_positions +| LIMIT 4 +; + +max:integer | job_positions:keyword +39878 | Accountant +62233 | Architect +39878 | Business Analyst +67492 | Data Scientist +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 5313e6630c75d..dd9d519649c01 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -1236,6 +1236,7 @@ off_on_holiday:keyword | back_home_again:keyword reverseGraphemeClusters required_capability: fn_reverse +required_capability: fn_reverse_grapheme_clusters ROW message = "áéíóúàèìòùâêîôû😊👍🏽🎉💖कंठाी" | EVAL message_reversed = REVERSE(message); message:keyword | message_reversed:keyword diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java index 3c39406198da3..dfca6ab2bf814 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlCapabilities.java @@ -32,6 +32,13 @@ public enum Cap { */ FN_REVERSE, + /** + * Support for reversing whole grapheme clusters. This is not supported + * on JDK versions less than 20 which are not supported in ES 9.0.0+ but this + * exists to keep the {@code 8.x} branch similar to the {@code main} branch. + */ + FN_REVERSE_GRAPHEME_CLUSTERS, + /** * Support for function {@code CBRT}. Done in #108574. */ @@ -133,7 +140,7 @@ public enum Cap { * - fixed variable shadowing * - fixed Join.references(), requiring breaking change to Join serialization */ - LOOKUP_V4(true), + LOOKUP_V4(Build.current().isSnapshot()), /** * Support for requesting the "REPEAT" command. @@ -279,7 +286,7 @@ public enum Cap { /** * Support for match operator */ - MATCH_OPERATOR(true), + MATCH_OPERATOR(Build.current().isSnapshot()), /** * Removing support for the {@code META} keyword. @@ -301,6 +308,11 @@ public enum Cap { */ TO_DATE_NANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** + * Support Least and Greatest functions on Date Nanos type + */ + LEAST_GREATEST_FOR_DATENANOS(EsqlCorePlugin.DATE_NANOS_FEATURE_FLAG), + /** * Support for datetime in least and greatest functions */ @@ -349,7 +361,7 @@ public enum Cap { /** * Supported the text categorization function "CATEGORIZE". */ - CATEGORIZE(true), + CATEGORIZE(Build.current().isSnapshot()), /** * QSTR function @@ -375,7 +387,7 @@ public enum Cap { /** * Support named parameters for field names. */ - NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES(true), + NAMED_PARAMETER_FOR_FIELD_AND_FUNCTION_NAMES(Build.current().isSnapshot()), /** * Fix sorting not allowed on _source and counters. @@ -387,6 +399,11 @@ public enum Cap { */ PER_AGG_FILTERING, + /** + * Fix {@link #PER_AGG_FILTERING} grouped by ordinals. + */ + PER_AGG_FILTERING_ORDS, + /** * Fix for https://github.com/elastic/elasticsearch/issues/114714 */ @@ -399,34 +416,39 @@ public enum Cap { /** * Support for semantic_text field mapping */ - SEMANTIC_TEXT_TYPE(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG); + SEMANTIC_TEXT_TYPE(EsqlCorePlugin.SEMANTIC_TEXT_FEATURE_FLAG), + /** + * Fix for an optimization that caused wrong results + * https://github.com/elastic/elasticsearch/issues/115281 + */ + FIX_FILTER_PUSHDOWN_PAST_STATS, - private final boolean snapshotOnly; - private final FeatureFlag featureFlag; + /** + * This enables 60_usage.yml "Basic ESQL usage....snapshot" version test. See also the next capability. + */ + SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot()), + + /** + * This enables 60_usage.yml "Basic ESQL usage....non-snapshot" version test. See also the previous capability. + */ + NON_SNAPSHOT_TEST_FOR_TELEMETRY(Build.current().isSnapshot() == false); + + private final boolean enabled; Cap() { - this(false, null); + this.enabled = true; }; - Cap(boolean snapshotOnly) { - this(snapshotOnly, null); + Cap(boolean enabled) { + this.enabled = enabled; }; Cap(FeatureFlag featureFlag) { - this(false, featureFlag); - } - - Cap(boolean snapshotOnly, FeatureFlag featureFlag) { - assert featureFlag == null || snapshotOnly == false; - this.snapshotOnly = snapshotOnly; - this.featureFlag = featureFlag; + this.enabled = featureFlag.isEnabled(); } public boolean isEnabled() { - if (featureFlag == null) { - return Build.current().isSnapshot() || this.snapshotOnly == false; - } - return featureFlag.isEnabled(); + return enabled; } public String capabilityName() { @@ -434,12 +456,17 @@ public String capabilityName() { } } - public static final Set CAPABILITIES = capabilities(); + public static final Set CAPABILITIES = capabilities(false); - private static Set capabilities() { + /** + * Get a {@link Set} of all capabilities. If the {@code all} parameter is {@code false} + * then only enabled capabilities are returned - otherwise all + * known capabilities are returned. + */ + public static Set capabilities(boolean all) { List caps = new ArrayList<>(); for (Cap cap : Cap.values()) { - if (cap.isEnabled()) { + if (all || cap.isEnabled()) { caps.add(cap.capabilityName()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java index d47ebeab4ca6c..aad2d37d414b8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Greatest.java @@ -43,7 +43,7 @@ public class Greatest extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Returns the maximum value from multiple columns. This is similar to <>\n" + "except it is intended to run on multiple columns at once.", note = "When run on `keyword` or `text` fields, this returns the last string in alphabetical order. " @@ -54,12 +54,12 @@ public Greatest( Source source, @Param( name = "first", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "First of the columns to evaluate." ) Expression first, @Param( name = "rest", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "The rest of the columns to evaluate.", optional = true ) List rest @@ -152,7 +152,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { if (dataType == DataType.INTEGER) { return new GreatestIntEvaluator.Factory(source(), factories); } - if (dataType == DataType.LONG || dataType == DataType.DATETIME) { + if (dataType == DataType.LONG || dataType == DataType.DATETIME || dataType == DataType.DATE_NANOS) { return new GreatestLongEvaluator.Factory(source(), factories); } if (DataType.isString(dataType) || dataType == DataType.IP || dataType == DataType.VERSION || dataType == DataType.UNSUPPORTED) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java index 81c1419dcf788..70ba9319385f3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Least.java @@ -43,7 +43,7 @@ public class Least extends EsqlScalarFunction implements OptionalArgument { private DataType dataType; @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Returns the minimum value from multiple columns. " + "This is similar to <> except it is intended to run on multiple columns at once.", examples = @Example(file = "math", tag = "least") @@ -52,12 +52,12 @@ public Least( Source source, @Param( name = "first", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "First of the columns to evaluate." ) Expression first, @Param( name = "rest", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "The rest of the columns to evaluate.", optional = true ) List rest @@ -151,7 +151,7 @@ public ExpressionEvaluator.Factory toEvaluator(ToEvaluator toEvaluator) { if (dataType == DataType.INTEGER) { return new LeastIntEvaluator.Factory(source(), factories); } - if (dataType == DataType.LONG || dataType == DataType.DATETIME) { + if (dataType == DataType.LONG || dataType == DataType.DATETIME || dataType == DataType.DATE_NANOS) { return new LeastLongEvaluator.Factory(source(), factories); } if (DataType.isString(dataType) || dataType == DataType.IP || dataType == DataType.VERSION || dataType == DataType.UNSUPPORTED) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java index b17ddddb422ce..34b89b4f78997 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupe.java @@ -38,6 +38,7 @@ public class MvDedupe extends AbstractMultivalueFunction { "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", @@ -60,6 +61,7 @@ public MvDedupe( "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java index a829b6f1417b9..ef562c339dfd9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSlice.java @@ -59,6 +59,7 @@ public class MvSlice extends EsqlScalarFunction implements OptionalArgument, Eva "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", @@ -87,6 +88,7 @@ public MvSlice( "cartesian_point", "cartesian_shape", "date", + "date_nanos", "double", "geo_point", "geo_shape", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java index d9e41233952de..5ca5618bf2a54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSort.java @@ -69,7 +69,7 @@ public class MvSort extends EsqlScalarFunction implements OptionalArgument, Vali private static final String INVALID_ORDER_ERROR = "Invalid order value in [{}], expected one of [{}, {}] but got [{}]"; @FunctionInfo( - returnType = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + returnType = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Sorts a multivalued field in lexicographical order.", examples = @Example(file = "ints", tag = "mv_sort") ) @@ -77,7 +77,7 @@ public MvSort( Source source, @Param( name = "field", - type = { "boolean", "date", "double", "integer", "ip", "keyword", "long", "text", "version" }, + type = { "boolean", "date", "date_nanos", "double", "integer", "ip", "keyword", "long", "text", "version" }, description = "Multivalue expression. If `null`, the function returns `null`." ) Expression field, @Param( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index 575bb085c41f7..6b9c8d0da025b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -53,6 +53,7 @@ public class Coalesce extends EsqlScalarFunction implements OptionalArgument { "boolean", "cartesian_point", "cartesian_shape", + "date_nanos", "date", "geo_point", "geo_shape", @@ -73,6 +74,7 @@ public Coalesce( "boolean", "cartesian_point", "cartesian_shape", + "date_nanos", "date", "geo_point", "geo_shape", @@ -90,6 +92,7 @@ public Coalesce( "boolean", "cartesian_point", "cartesian_shape", + "date_nanos", "date", "geo_point", "geo_shape", diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java index ed09d0bc16754..15e49c22a44db 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFilters.java @@ -15,8 +15,6 @@ import org.elasticsearch.xpack.esql.core.expression.Expressions; import org.elasticsearch.xpack.esql.core.expression.ReferenceAttribute; import org.elasticsearch.xpack.esql.core.expression.predicate.Predicates; -import org.elasticsearch.xpack.esql.expression.function.aggregate.AggregateFunction; -import org.elasticsearch.xpack.esql.plan.logical.Aggregate; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.Filter; @@ -38,18 +36,13 @@ protected LogicalPlan rule(Filter filter) { LogicalPlan child = filter.child(); Expression condition = filter.condition(); + // TODO: Push down past STATS if the filter is only on the groups; but take into account how `STATS ... BY field` handles + // multi-values: It seems to be equivalent to `EVAL field = MV_DEDUPE(field) | MV_EXPAND(field) | STATS ... BY field`, where the + // last `STATS ... BY field` can assume that `field` is single-valued (to be checked more thoroughly). + // https://github.com/elastic/elasticsearch/issues/115311 if (child instanceof Filter f) { // combine nodes into a single Filter with updated ANDed condition plan = f.with(Predicates.combineAnd(List.of(f.condition(), condition))); - } else if (child instanceof Aggregate agg) { // TODO: re-evaluate along with multi-value support - // Only push [parts of] a filter past an agg if these/it operates on agg's grouping[s], not output. - plan = maybePushDownPastUnary( - filter, - agg, - e -> e instanceof Attribute && agg.output().contains(e) && agg.groupings().contains(e) == false - || e instanceof AggregateFunction, - NO_OP - ); } else if (child instanceof Eval eval) { // Don't push if Filter (still) contains references to Eval's fields. // Account for simple aliases in the Eval, though - these shouldn't stop us. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index ce072e7b0a438..3119fd4b52153 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -257,7 +257,11 @@ public final void test() throws Throwable { assertThat( "Capability is not included in the enabled list capabilities on a snapshot build. Spelling mistake?", testCase.requiredCapabilities, - everyItem(in(EsqlCapabilities.CAPABILITIES)) + everyItem(in(EsqlCapabilities.capabilities(true))) + ); + assumeTrue( + "Capability not supported in this build", + EsqlCapabilities.capabilities(false).containsAll(testCase.requiredCapabilities) ); } else { for (EsqlCapabilities.Cap c : EsqlCapabilities.Cap.values()) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java index 1c917a961a343..db5d8e03458ea 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractAggregationTestCase.java @@ -163,15 +163,7 @@ private void aggregateSingleMode(Expression expression) { result = extractResultFromAggregator(aggregator, PlannerUtils.toElementType(testCase.expectedType())); } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } private void aggregateGroupingSingleMode(Expression expression) { @@ -263,15 +255,7 @@ private void aggregateWithIntermediates(Expression expression) { result = extractResultFromAggregator(aggregator, PlannerUtils.toElementType(testCase.expectedType())); } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } private void evaluate(Expression evaluableExpression) { @@ -288,15 +272,7 @@ private void evaluate(Expression evaluableExpression) { if (testCase.expectedType() == DataType.UNSIGNED_LONG && result != null) { result = NumericUtils.unsignedLongAsBigInteger((Long) result); } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } private void resolveExpression(Expression expression, Consumer onAggregator, Consumer onEvaluableExpression) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 84a41ef040c8e..c05f8e0990b3c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -99,8 +99,10 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; /** @@ -722,17 +724,19 @@ public static void testFunctionInfo() { for (int i = 0; i < args.size() && i < types.size(); i++) { typesFromSignature.get(i).add(types.get(i).esNameIfPossible()); } - returnFromSignature.add(entry.getValue().esNameIfPossible()); + if (DataType.UNDER_CONSTRUCTION.containsKey(entry.getValue()) == false) { + returnFromSignature.add(entry.getValue().esNameIfPossible()); + } } for (int i = 0; i < args.size(); i++) { EsqlFunctionRegistry.ArgSignature arg = args.get(i); Set annotationTypes = Arrays.stream(arg.type()) - .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false) .collect(Collectors.toCollection(TreeSet::new)); Set signatureTypes = typesFromSignature.get(i) .stream() - .filter(DataType.UNDER_CONSTRUCTION::containsKey) + .filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false) .collect(Collectors.toCollection(TreeSet::new)); if (signatureTypes.isEmpty()) { log.info("{}: skipping", arg.name()); @@ -746,10 +750,40 @@ public static void testFunctionInfo() { ); } - Set returnTypes = Arrays.stream(description.returnType()).collect(Collectors.toCollection(TreeSet::new)); + Set returnTypes = Arrays.stream(description.returnType()) + .filter(t -> DataType.UNDER_CONSTRUCTION.containsKey(DataType.fromNameOrAlias(t)) == false) + .collect(Collectors.toCollection(TreeSet::new)); assertEquals(returnFromSignature, returnTypes); } + /** + * Asserts the result of a test case matches the expected result and warnings. + *

+ * The {@code result} parameter should be an object as returned by {@link #toJavaObjectUnsignedLongAware}. + *

+ */ + @SuppressWarnings("unchecked") + protected final void assertTestCaseResultAndWarnings(Object result) { + if (result instanceof Iterable) { + var collectionResult = (Iterable) result; + assertThat(collectionResult, not(hasItem(Double.NaN))); + assertThat(collectionResult, not(hasItem(Double.POSITIVE_INFINITY))); + assertThat(collectionResult, not(hasItem(Double.NEGATIVE_INFINITY))); + } + + assert testCase.getMatcher().matches(Double.NaN) == false; + assertThat(result, not(equalTo(Double.NaN))); + assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); + assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; + assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); + assertThat(result, testCase.getMatcher()); + + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); + } + } + protected final void assertTypeResolutionFailure(Expression expression) { assertTrue("expected unresolved", expression.typeResolved().unresolved()); assertThat(expression.typeResolved().message(), equalTo(testCase.getExpectedTypeError())); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java index 85db73901352b..65e8a53fc05c5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractScalarFunctionTestCase.java @@ -41,7 +41,6 @@ import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @@ -127,15 +126,7 @@ public final void testEvaluate() { result = toJavaObjectUnsignedLongAware(block, 0); } } - assertThat(result, not(equalTo(Double.NaN))); - assert testCase.getMatcher().matches(Double.POSITIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.POSITIVE_INFINITY))); - assert testCase.getMatcher().matches(Double.NEGATIVE_INFINITY) == false; - assertThat(result, not(equalTo(Double.NEGATIVE_INFINITY))); - assertThat(result, testCase.getMatcher()); - if (testCase.getExpectedWarnings() != null) { - assertWarnings(testCase.getExpectedWarnings()); - } + assertTestCaseResultAndWarnings(result); } /** diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java index e740533462746..7fe67707a7976 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/MultiRowTestCaseSupplier.java @@ -19,6 +19,7 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.List; +import java.util.function.Supplier; import static org.elasticsearch.test.ESTestCase.randomBoolean; import static org.elasticsearch.test.ESTestCase.randomList; @@ -37,56 +38,36 @@ public static List intCases(int minRows, int maxRows, int min List cases = new ArrayList<>(); if (0 <= max && 0 >= min && includeZero) { - cases.add(new TypedDataSupplier("<0 ints>", () -> randomList(minRows, maxRows, () -> 0), DataType.INTEGER, false, true)); + addSuppliers(cases, minRows, maxRows, "0 int", DataType.INTEGER, () -> 0); } if (max != 0) { - cases.add( - new TypedDataSupplier("<" + max + " ints>", () -> randomList(minRows, maxRows, () -> max), DataType.INTEGER, false, true) - ); + addSuppliers(cases, minRows, maxRows, max + " int", DataType.INTEGER, () -> max); } if (min != 0 && min != max) { - cases.add( - new TypedDataSupplier("<" + min + " ints>", () -> randomList(minRows, maxRows, () -> min), DataType.INTEGER, false, true) - ); + addSuppliers(cases, minRows, maxRows, min + " int", DataType.INTEGER, () -> min); } int lower = Math.max(min, 1); int upper = Math.min(max, Integer.MAX_VALUE); if (lower < upper) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomIntBetween(lower, upper)), - DataType.INTEGER, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "positive int", DataType.INTEGER, () -> ESTestCase.randomIntBetween(lower, upper)); } int lower1 = Math.max(min, Integer.MIN_VALUE); int upper1 = Math.min(max, -1); if (lower1 < upper1) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomIntBetween(lower1, upper1)), - DataType.INTEGER, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "negative int", DataType.INTEGER, () -> ESTestCase.randomIntBetween(lower1, upper1)); } if (min < 0 && max > 0) { - cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + addSuppliers(cases, minRows, maxRows, "random int", DataType.INTEGER, () -> { if (includeZero) { return ESTestCase.randomIntBetween(min, max); } return randomBoolean() ? ESTestCase.randomIntBetween(min, -1) : ESTestCase.randomIntBetween(1, max); - }), DataType.INTEGER, false, true)); + }); } return cases; @@ -96,56 +77,36 @@ public static List longCases(int minRows, int maxRows, long m List cases = new ArrayList<>(); if (0 <= max && 0 >= min && includeZero) { - cases.add(new TypedDataSupplier("<0 longs>", () -> randomList(minRows, maxRows, () -> 0L), DataType.LONG, false, true)); + addSuppliers(cases, minRows, maxRows, "0 long", DataType.LONG, () -> 0L); } if (max != 0) { - cases.add( - new TypedDataSupplier("<" + max + " longs>", () -> randomList(minRows, maxRows, () -> max), DataType.LONG, false, true) - ); + addSuppliers(cases, minRows, maxRows, max + " long", DataType.LONG, () -> max); } if (min != 0 && min != max) { - cases.add( - new TypedDataSupplier("<" + min + " longs>", () -> randomList(minRows, maxRows, () -> min), DataType.LONG, false, true) - ); + addSuppliers(cases, minRows, maxRows, min + " long", DataType.LONG, () -> min); } long lower = Math.max(min, 1); long upper = Math.min(max, Long.MAX_VALUE); if (lower < upper) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(lower, upper)), - DataType.LONG, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "positive long", DataType.LONG, () -> ESTestCase.randomLongBetween(lower, upper)); } long lower1 = Math.max(min, Long.MIN_VALUE); long upper1 = Math.min(max, -1); if (lower1 < upper1) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(lower1, upper1)), - DataType.LONG, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "negative long", DataType.LONG, () -> ESTestCase.randomLongBetween(lower1, upper1)); } if (min < 0 && max > 0) { - cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + addSuppliers(cases, minRows, maxRows, "random long", DataType.LONG, () -> { if (includeZero) { return ESTestCase.randomLongBetween(min, max); } return randomBoolean() ? ESTestCase.randomLongBetween(min, -1) : ESTestCase.randomLongBetween(1, max); - }), DataType.LONG, false, true)); + }); } return cases; @@ -156,29 +117,20 @@ public static List ulongCases(int minRows, int maxRows, BigIn // Zero if (BigInteger.ZERO.compareTo(max) <= 0 && BigInteger.ZERO.compareTo(min) >= 0 && includeZero) { - cases.add( - new TypedDataSupplier( - "<0 unsigned longs>", - () -> randomList(minRows, maxRows, () -> BigInteger.ZERO), - DataType.UNSIGNED_LONG, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "0 unsigned long", DataType.UNSIGNED_LONG, () -> BigInteger.ZERO); } // Small values, less than Long.MAX_VALUE BigInteger lower1 = min.max(BigInteger.ONE); BigInteger upper1 = max.min(BigInteger.valueOf(Long.MAX_VALUE)); if (lower1.compareTo(upper1) < 0) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomUnsignedLongBetween(lower1, upper1)), - DataType.UNSIGNED_LONG, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "small unsigned long", + DataType.UNSIGNED_LONG, + () -> ESTestCase.randomUnsignedLongBetween(lower1, upper1) ); } @@ -186,14 +138,13 @@ public static List ulongCases(int minRows, int maxRows, BigIn BigInteger lower2 = min.max(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE)); BigInteger upper2 = max.min(ESTestCase.UNSIGNED_LONG_MAX); if (lower2.compareTo(upper2) < 0) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomUnsignedLongBetween(lower2, upper2)), - DataType.UNSIGNED_LONG, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "big unsigned long", + DataType.UNSIGNED_LONG, + () -> ESTestCase.randomUnsignedLongBetween(lower2, upper2) ); } @@ -204,85 +155,77 @@ public static List doubleCases(int minRows, int maxRows, doub List cases = new ArrayList<>(); if (0d <= max && 0d >= min && includeZero) { - cases.add(new TypedDataSupplier("<0 doubles>", () -> randomList(minRows, maxRows, () -> 0d), DataType.DOUBLE, false, true)); - cases.add(new TypedDataSupplier("<-0 doubles>", () -> randomList(minRows, maxRows, () -> -0d), DataType.DOUBLE, false, true)); + addSuppliers(cases, minRows, maxRows, "0 double", DataType.DOUBLE, () -> 0d); + addSuppliers(cases, minRows, maxRows, "-0 double", DataType.DOUBLE, () -> -0d); } if (max != 0d) { - cases.add( - new TypedDataSupplier("<" + max + " doubles>", () -> randomList(minRows, maxRows, () -> max), DataType.DOUBLE, false, true) - ); + addSuppliers(cases, minRows, maxRows, max + " double", DataType.DOUBLE, () -> max); } if (min != 0d && min != max) { - cases.add( - new TypedDataSupplier("<" + min + " doubles>", () -> randomList(minRows, maxRows, () -> min), DataType.DOUBLE, false, true) - ); + addSuppliers(cases, minRows, maxRows, min + " double", DataType.DOUBLE, () -> min); } double lower1 = Math.max(min, 0d); double upper1 = Math.min(max, 1d); if (lower1 < upper1) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower1, upper1, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "small positive double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower1, upper1, true) ); } double lower2 = Math.max(min, -1d); double upper2 = Math.min(max, 0d); if (lower2 < upper2) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower2, upper2, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "small negative double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower2, upper2, true) ); } double lower3 = Math.max(min, 1d); double upper3 = Math.min(max, Double.MAX_VALUE); if (lower3 < upper3) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower3, upper3, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "big positive double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower3, upper3, true) ); } double lower4 = Math.max(min, -Double.MAX_VALUE); double upper4 = Math.min(max, -1d); if (lower4 < upper4) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> ESTestCase.randomDoubleBetween(lower4, upper4, true)), - DataType.DOUBLE, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "big negative double", + DataType.DOUBLE, + () -> ESTestCase.randomDoubleBetween(lower4, upper4, true) ); } if (min < 0 && max > 0) { - cases.add(new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> { + addSuppliers(cases, minRows, maxRows, "random double", DataType.DOUBLE, () -> { if (includeZero) { return ESTestCase.randomDoubleBetween(min, max, true); } return randomBoolean() ? ESTestCase.randomDoubleBetween(min, -1, true) : ESTestCase.randomDoubleBetween(1, max, true); - }), DataType.DOUBLE, false, true)); + }); } return cases; @@ -291,149 +234,126 @@ public static List doubleCases(int minRows, int maxRows, doub public static List dateCases(int minRows, int maxRows) { List cases = new ArrayList<>(); - cases.add( - new TypedDataSupplier( - "<1970-01-01T00:00:00Z dates>", - () -> randomList(minRows, maxRows, () -> 0L), - DataType.DATETIME, - false, - true - ) - ); + addSuppliers(cases, minRows, maxRows, "1970-01-01T00:00:00Z date", DataType.DATETIME, () -> 0L); - cases.add( - new TypedDataSupplier( - "", - // 1970-01-01T00:00:00Z - 2286-11-20T17:46:40Z - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11)), - DataType.DATETIME, - false, - true - ) - ); + // 1970-01-01T00:00:00Z - 2286-11-20T17:46:40Z + addSuppliers(cases, minRows, maxRows, "random date", DataType.DATETIME, () -> ESTestCase.randomLongBetween(0, 10 * (long) 10e11)); - cases.add( - new TypedDataSupplier( - "", - // 2286-11-20T17:46:40Z - +292278994-08-17T07:12:55.807Z - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE)), - DataType.DATETIME, - false, - true - ) + // 2286-11-20T17:46:40Z - +292278994-08-17T07:12:55.807Z + addSuppliers( + cases, + minRows, + maxRows, + "far future date", + DataType.DATETIME, + () -> ESTestCase.randomLongBetween(10 * (long) 10e11, Long.MAX_VALUE) ); - cases.add( - new TypedDataSupplier( - "", - // very close to +292278994-08-17T07:12:55.807Z, the maximum supported millis since epoch - () -> randomList(minRows, maxRows, () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE)), - DataType.DATETIME, - false, - true - ) + // Very close to +292278994-08-17T07:12:55.807Z, the maximum supported millis since epoch + addSuppliers( + cases, + minRows, + maxRows, + "near the end of time date", + DataType.DATETIME, + () -> ESTestCase.randomLongBetween(Long.MAX_VALUE / 100 * 99, Long.MAX_VALUE) ); return cases; } public static List booleanCases(int minRows, int maxRows) { - return List.of( - new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> true), DataType.BOOLEAN, false, true), - new TypedDataSupplier("", () -> randomList(minRows, maxRows, () -> false), DataType.BOOLEAN, false, true), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, ESTestCase::randomBoolean), - DataType.BOOLEAN, - false, - true - ) - ); + List cases = new ArrayList<>(); + + addSuppliers(cases, minRows, maxRows, "true boolean", DataType.BOOLEAN, () -> true); + addSuppliers(cases, minRows, maxRows, "false boolean", DataType.BOOLEAN, () -> false); + addSuppliers(cases, minRows, maxRows, "random boolean", DataType.BOOLEAN, ESTestCase::randomBoolean); + + return cases; } public static List ipCases(int minRows, int maxRows) { - return List.of( - new TypedDataSupplier( - "<127.0.0.1 ips>", - () -> randomList(minRows, maxRows, () -> new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1")))), - DataType.IP, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(true)))), - DataType.IP, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(false)))), - DataType.IP, - false, - true - ) + List cases = new ArrayList<>(); + + addSuppliers( + cases, + minRows, + maxRows, + "127.0.0.1 ip", + DataType.IP, + () -> new BytesRef(InetAddressPoint.encode(InetAddresses.forString("127.0.0.1"))) + ); + addSuppliers( + cases, + minRows, + maxRows, + "random v4 ip", + DataType.IP, + () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(true))) + ); + addSuppliers( + cases, + minRows, + maxRows, + "random v6 ip", + DataType.IP, + () -> new BytesRef(InetAddressPoint.encode(ESTestCase.randomIp(false))) ); + + return cases; } public static List versionCases(int minRows, int maxRows) { - return List.of( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new Version(Integer.toString(ESTestCase.between(0, 100))).toBytesRef()), - DataType.VERSION, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList( - minRows, - maxRows, - () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)).toBytesRef() - ), - DataType.VERSION, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList( - minRows, - maxRows, - () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)) - .toBytesRef() - ), - DataType.VERSION, - false, - true - ) + List cases = new ArrayList<>(); + + addSuppliers( + cases, + minRows, + maxRows, + "major version", + DataType.VERSION, + () -> new Version(Integer.toString(ESTestCase.between(0, 100))).toBytesRef() ); + addSuppliers( + cases, + minRows, + maxRows, + "major.minor version", + DataType.VERSION, + () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)).toBytesRef() + ); + addSuppliers( + cases, + minRows, + maxRows, + "major.minor.patch version", + DataType.VERSION, + () -> new Version(ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100) + "." + ESTestCase.between(0, 100)).toBytesRef() + ); + + return cases; } public static List geoPointCases(int minRows, int maxRows, boolean withAltitude) { List cases = new ArrayList<>(); - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> GEO.asWkb(GeometryTestUtils.randomPoint(false))), - DataType.GEO_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.GEO_POINT, + () -> GEO.asWkb(GeometryTestUtils.randomPoint(false)) ); if (withAltitude) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> GEO.asWkb(GeometryTestUtils.randomPoint(false))), - DataType.GEO_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.GEO_POINT, + () -> GEO.asWkb(GeometryTestUtils.randomPoint(true)) ); } @@ -443,25 +363,23 @@ public static List geoPointCases(int minRows, int maxRows, bo public static List cartesianPointCases(int minRows, int maxRows, boolean withAltitude) { List cases = new ArrayList<>(); - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(false))), - DataType.CARTESIAN_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.CARTESIAN_POINT, + () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(false)) ); if (withAltitude) { - cases.add( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(true))), - DataType.CARTESIAN_POINT, - false, - true - ) + addSuppliers( + cases, + minRows, + maxRows, + "", + DataType.CARTESIAN_POINT, + () -> CARTESIAN.asWkb(ShapeTestUtils.randomPoint(true)) ); } @@ -471,59 +389,64 @@ public static List cartesianPointCases(int minRows, int maxRo public static List stringCases(int minRows, int maxRows, DataType type) { List cases = new ArrayList<>(); - cases.addAll( - List.of( - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef("")), - type, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(1, 30))), - type, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList(minRows, maxRows, () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(1, 30))), - type, - false, - true - ) - ) + addSuppliers(cases, minRows, maxRows, "empty " + type, type, () -> new BytesRef("")); + addSuppliers( + cases, + minRows, + maxRows, + "short alpha " + type, + type, + () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(1, 30)) + ); + addSuppliers( + cases, + minRows, + maxRows, + "short unicode " + type, + type, + () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(1, 30)) ); if (minRows <= 100) { var longStringsMaxRows = Math.min(maxRows, 100); - cases.addAll( - List.of( - new TypedDataSupplier( - "", - () -> randomList(minRows, longStringsMaxRows, () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(300, 1000))), - type, - false, - true - ), - new TypedDataSupplier( - "", - () -> randomList( - minRows, - longStringsMaxRows, - () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(300, 1000)) - ), - type, - false, - true - ) - ) + addSuppliers( + cases, + minRows, + longStringsMaxRows, + "long alpha " + type, + type, + () -> new BytesRef(ESTestCase.randomAlphaOfLengthBetween(300, 1000)) + ); + addSuppliers( + cases, + minRows, + longStringsMaxRows, + "long unicode " + type, + type, + () -> new BytesRef(ESTestCase.randomRealisticUnicodeOfLengthBetween(300, 1000)) ); } return cases; } + + private static void addSuppliers( + List cases, + int minRows, + int maxRows, + String name, + DataType type, + Supplier valueSupplier + ) { + if (minRows <= 1 && maxRows >= 1) { + cases.add(new TypedDataSupplier("", () -> randomList(1, 1, valueSupplier), type, false, true)); + } + + if (maxRows > 1) { + cases.add( + new TypedDataSupplier("<" + name + "s>", () -> randomList(Math.max(2, minRows), maxRows, valueSupplier), type, false, true) + ); + } + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java index 80737dac1aa58..ac599c7ff05f8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/aggregate/AvgTests.java @@ -64,25 +64,27 @@ protected Expression build(Source source, List args) { private static TestCaseSupplier makeSupplier(TestCaseSupplier.TypedDataSupplier fieldSupplier) { return new TestCaseSupplier(List.of(fieldSupplier.type()), () -> { var fieldTypedData = fieldSupplier.get(); + var fieldData = fieldTypedData.multiRowData(); - Object expected = switch (fieldTypedData.type().widenSmallNumeric()) { - case INTEGER -> fieldTypedData.multiRowData() - .stream() - .map(v -> (Integer) v) - .collect(Collectors.summarizingInt(Integer::intValue)) - .getAverage(); - case LONG -> fieldTypedData.multiRowData() - .stream() - .map(v -> (Long) v) - .collect(Collectors.summarizingLong(Long::longValue)) - .getAverage(); - case DOUBLE -> fieldTypedData.multiRowData() - .stream() - .map(v -> (Double) v) - .collect(Collectors.summarizingDouble(Double::doubleValue)) - .getAverage(); - default -> throw new IllegalStateException("Unexpected value: " + fieldTypedData.type()); - }; + Object expected = null; + + if (fieldData.size() == 1) { + // For single elements, we directly return them to avoid precision issues + expected = ((Number) fieldData.get(0)).doubleValue(); + } else if (fieldData.size() > 1) { + expected = switch (fieldTypedData.type().widenSmallNumeric()) { + case INTEGER -> fieldData.stream() + .map(v -> (Integer) v) + .collect(Collectors.summarizingInt(Integer::intValue)) + .getAverage(); + case LONG -> fieldData.stream().map(v -> (Long) v).collect(Collectors.summarizingLong(Long::longValue)).getAverage(); + case DOUBLE -> fieldData.stream() + .map(v -> (Double) v) + .collect(Collectors.summarizingDouble(Double::doubleValue)) + .getAverage(); + default -> throw new IllegalStateException("Unexpected value: " + fieldTypedData.type()); + }; + } return new TestCaseSupplier.TestCase( List.of(fieldTypedData), diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index db3fce244c9a8..fbb7c691b1d94 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -46,6 +46,7 @@ public class CaseTests extends AbstractScalarFunctionTestCase { DataType.TEXT, DataType.BOOLEAN, DataType.DATETIME, + DataType.DATE_NANOS, DataType.DOUBLE, DataType.INTEGER, DataType.LONG, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java index 311e3e3d89149..07d6ae34dc1e7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java @@ -115,6 +115,21 @@ public static Iterable parameters() { ) ) ); + suppliers.add( + new TestCaseSupplier( + "(a, b)", + List.of(DataType.DATE_NANOS, DataType.DATE_NANOS), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1727877348000123456L, DataType.DATE_NANOS, "a"), + new TestCaseSupplier.TypedData(1727790948000987654L, DataType.DATE_NANOS, "b") + ), + "GreatestLongEvaluator[values=[MvMax[field=Attribute[channel=0]], MvMax[field=Attribute[channel=1]]]]", + DataType.DATE_NANOS, + equalTo(1727877348000123456L) + ) + ) + ); return parameterSuppliersFromTypedData(anyNullIsNull(false, suppliers)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java index 69842fde90312..d95cc79dd22e0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java @@ -114,6 +114,21 @@ public static Iterable parameters() { ) ) ); + suppliers.add( + new TestCaseSupplier( + "(a, b)", + List.of(DataType.DATE_NANOS, DataType.DATE_NANOS), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1727877348000123456L, DataType.DATE_NANOS, "a"), + new TestCaseSupplier.TypedData(1727790948000987654L, DataType.DATE_NANOS, "b") + ), + "LeastLongEvaluator[values=[MvMin[field=Attribute[channel=0]], MvMin[field=Attribute[channel=1]]]]", + DataType.DATE_NANOS, + equalTo(1727790948000987654L) + ) + ) + ); return parameterSuppliersFromTypedData(anyNullIsNull(false, suppliers)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java index e91a5cc1ebca4..485073d1a91d2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDateNanosTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.common.time.DateUtils; +import org.elasticsearch.xpack.esql.action.EsqlCapabilities; import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.type.DataType; @@ -29,6 +30,9 @@ public ToDateNanosTests(@Name("TestCase") Supplier te @ParametersFactory public static Iterable parameters() { + if (EsqlCapabilities.Cap.TO_DATE_NANOS.isEnabled() == false) { + return List.of(); + } final String read = "Attribute[channel=0]"; final List suppliers = new ArrayList<>(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java index d8d3b607efcc0..f3b44274f3ade 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java @@ -39,6 +39,7 @@ public static Iterable parameters() { booleans(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values)); bytesRefs(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values)); dateTimes(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Long::valueOf))); + dateNanos(cases, "mv_dedupe", "MvDedupe", DataType.DATE_NANOS, (size, values) -> getMatcher(values.mapToObj(Long::valueOf))); doubles(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Double::valueOf))); ints(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Integer::valueOf))); longs(cases, "mv_dedupe", "MvDedupe", (size, values) -> getMatcher(values.mapToObj(Long::valueOf))); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java index e5bac422805af..859c79090d62f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSliceTests.java @@ -182,6 +182,23 @@ private static void longs(List suppliers) { equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) ); })); + + suppliers.add(new TestCaseSupplier(List.of(DataType.DATE_NANOS, DataType.INTEGER, DataType.INTEGER), () -> { + List field = randomList(1, 10, () -> randomLong()); + int length = field.size(); + int start = randomIntBetween(0, length - 1); + int end = randomIntBetween(start, length - 1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataType.DATE_NANOS, "field"), + new TestCaseSupplier.TypedData(start, DataType.INTEGER, "start"), + new TestCaseSupplier.TypedData(end, DataType.INTEGER, "end") + ), + "MvSliceLongEvaluator[field=Attribute[channel=0], start=Attribute[channel=1], end=Attribute[channel=2]]", + DataType.DATE_NANOS, + equalTo(start == end ? field.get(start) : field.subList(start, end + 1)) + ); + })); } private static void doubles(List suppliers) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java index d07ed2aeae887..63f538059dddf 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSortTests.java @@ -110,6 +110,20 @@ private static void longs(List suppliers) { equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) ); })); + + suppliers.add(new TestCaseSupplier(List.of(DataType.DATE_NANOS, DataType.KEYWORD), () -> { + List field = randomList(1, 10, () -> randomLong()); + BytesRef order = new BytesRef("DESC"); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(field, DataType.DATE_NANOS, "field"), + new TestCaseSupplier.TypedData(order, DataType.KEYWORD, "order").forceLiteral() + ), + "MvSortLong[field=Attribute[channel=0], order=false]", + DataType.DATE_NANOS, + equalTo(field.size() == 1 ? field.iterator().next() : field.stream().sorted(Collections.reverseOrder()).toList()) + ); + })); } private static void doubles(List suppliers) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java index c9b6de64e079d..797c99992815e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java @@ -96,6 +96,19 @@ public static Iterable parameters() { equalTo(firstDate == null ? secondDate : firstDate) ); })); + noNullsSuppliers.add(new TestCaseSupplier(List.of(DataType.DATE_NANOS, DataType.DATE_NANOS), () -> { + Long firstDate = randomBoolean() ? null : randomNonNegativeLong(); + Long secondDate = randomNonNegativeLong(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(firstDate, DataType.DATE_NANOS, "first"), + new TestCaseSupplier.TypedData(secondDate, DataType.DATE_NANOS, "second") + ), + "CoalesceEvaluator[values=[Attribute[channel=0], Attribute[channel=1]]]", + DataType.DATE_NANOS, + equalTo(firstDate == null ? secondDate : firstDate) + ); + })); List suppliers = new ArrayList<>(noNullsSuppliers); for (TestCaseSupplier s : noNullsSuppliers) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 8d7c1997f78e3..ff7675504d6ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -738,6 +738,7 @@ public void testMultipleCombineLimits() { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivelyPushDownFilterPastRefAgg() { // expected plan: "from test | where emp_no > 1 and emp_no < 3 | stats x = count(1) by emp_no | where x > 7" LogicalPlan plan = optimizedPlan(""" @@ -790,6 +791,7 @@ public void testNoPushDownOrFilterPastAgg() { assertTrue(stats.child() instanceof EsRelation); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivePushDownComplexFilterPastAgg() { // expected plan: from test | emp_no > 0 | stats x = count(1) by emp_no | where emp_no < 3 or x > 9 LogicalPlan plan = optimizedPlan(""" @@ -1393,13 +1395,15 @@ public void testPushDownLimitThroughMultipleSort_AfterMvExpand2() { } /** + * TODO: Push down the filter correctly https://github.com/elastic/elasticsearch/issues/115311 + * * Expected * Limit[5[INTEGER]] - * \_Aggregate[[first_name{f}#232],[MAX(salary{f}#233) AS max_s, first_name{f}#232]] - * \_Filter[ISNOTNULL(first_name{f}#232)] - * \_MvExpand[first_name{f}#232] - * \_TopN[[Order[emp_no{f}#231,ASC,LAST]],50[INTEGER]] - * \_EsRelation[employees][emp_no{f}#231, first_name{f}#232, salary{f}#233] + * \_Filter[ISNOTNULL(first_name{r}#23)] + * \_Aggregate[STANDARD,[first_name{r}#23],[MAX(salary{f}#18,true[BOOLEAN]) AS max_s, first_name{r}#23]] + * \_MvExpand[first_name{f}#14,first_name{r}#23] + * \_TopN[[Order[emp_no{f}#13,ASC,LAST]],50[INTEGER]] + * \_EsRelation[test][_meta_field{f}#19, emp_no{f}#13, first_name{f}#14, ..] */ public void testDontPushDownLimitPastAggregate_AndMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1413,10 +1417,10 @@ public void testDontPushDownLimitPastAggregate_AndMvExpand() { | limit 5"""); var limit = as(plan, Limit.class); + var filter = as(limit.child(), Filter.class); assertThat(limit.limit().fold(), equalTo(5)); - var agg = as(limit.child(), Aggregate.class); - var filter = as(agg.child(), Filter.class); - var mvExp = as(filter.child(), MvExpand.class); + var agg = as(filter.child(), Aggregate.class); + var mvExp = as(agg.child(), MvExpand.class); var topN = as(mvExp.child(), TopN.class); assertThat(topN.limit().fold(), equalTo(50)); assertThat(orderNames(topN), contains("emp_no")); @@ -1424,14 +1428,16 @@ public void testDontPushDownLimitPastAggregate_AndMvExpand() { } /** + * TODO: Push down the filter correctly https://github.com/elastic/elasticsearch/issues/115311 + * * Expected * Limit[5[INTEGER]] - * \_Aggregate[[first_name{f}#262],[MAX(salary{f}#263) AS max_s, first_name{f}#262]] - * \_Filter[ISNOTNULL(first_name{f}#262)] - * \_Limit[50[INTEGER]] - * \_MvExpand[first_name{f}#262] - * \_Limit[50[INTEGER]] - * \_EsRelation[employees][emp_no{f}#261, first_name{f}#262, salary{f}#263] + * \_Filter[ISNOTNULL(first_name{r}#22)] + * \_Aggregate[STANDARD,[first_name{r}#22],[MAX(salary{f}#17,true[BOOLEAN]) AS max_s, first_name{r}#22]] + * \_Limit[50[INTEGER]] + * \_MvExpand[first_name{f}#13,first_name{r}#22] + * \_Limit[50[INTEGER]] + * \_EsRelation[test][_meta_field{f}#18, emp_no{f}#12, first_name{f}#13, ..] */ public void testPushDown_TheRightLimit_PastMvExpand() { LogicalPlan plan = optimizedPlan(""" @@ -1445,9 +1451,9 @@ public void testPushDown_TheRightLimit_PastMvExpand() { var limit = as(plan, Limit.class); assertThat(limit.limit().fold(), equalTo(5)); - var agg = as(limit.child(), Aggregate.class); - var filter = as(agg.child(), Filter.class); - limit = as(filter.child(), Limit.class); + var filter = as(limit.child(), Filter.class); + var agg = as(filter.child(), Aggregate.class); + limit = as(agg.child(), Limit.class); assertThat(limit.limit().fold(), equalTo(50)); var mvExp = as(limit.child(), MvExpand.class); limit = as(mvExp.child(), Limit.class); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java index 49a738f4f4fa3..e159e5ed0bd7d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/rules/logical/PushDownAndCombineFiltersTests.java @@ -213,6 +213,7 @@ public void testPushDownLikeRlikeFilter() { // from ... | where a > 1 | stats count(1) by b | where count(1) >= 3 and b < 2 // => ... | where a > 1 and b < 2 | stats count(1) by b | where count(1) >= 3 + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/115311") public void testSelectivelyPushDownFilterPastFunctionAgg() { EsRelation relation = relation(); GreaterThan conditionA = greaterThanOf(getFieldAttribute("a"), ONE); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java index f81ebc25dc860..0bfb6e9e43b03 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/CreateFromDeploymentIT.java @@ -109,6 +109,55 @@ public void testModelIdDoesNotMatch() throws IOException { ); } + public void testNumAllocationsIsUpdated() throws IOException { + var modelId = "update_num_allocations"; + var deploymentId = modelId; + + CustomElandModelIT.createMlNodeTextExpansionModel(modelId, client()); + var response = startMlNodeDeploymemnt(modelId, deploymentId); + assertOkOrCreated(response); + + var inferenceId = "test_num_allocations_updated"; + var putModel = putModel(inferenceId, endpointConfig(deploymentId), TaskType.SPARSE_EMBEDDING); + var serviceSettings = putModel.get("service_settings"); + assertThat( + putModel.toString(), + serviceSettings, + is( + Map.of( + "num_allocations", + 1, + "num_threads", + 1, + "model_id", + "update_num_allocations", + "deployment_id", + "update_num_allocations" + ) + ) + ); + + assertOkOrCreated(updateMlNodeDeploymemnt(deploymentId, 2)); + + var updatedServiceSettings = getModel(inferenceId).get("service_settings"); + assertThat( + updatedServiceSettings.toString(), + updatedServiceSettings, + is( + Map.of( + "num_allocations", + 2, + "num_threads", + 1, + "model_id", + "update_num_allocations", + "deployment_id", + "update_num_allocations" + ) + ) + ); + } + private String endpointConfig(String deploymentId) { return Strings.format(""" { @@ -147,6 +196,20 @@ private Response startMlNodeDeploymemnt(String modelId, String deploymentId) thr return client().performRequest(request); } + private Response updateMlNodeDeploymemnt(String deploymentId, int numAllocations) throws IOException { + String endPoint = "/_ml/trained_models/" + deploymentId + "/deployment/_update"; + + var body = Strings.format(""" + { + "number_of_allocations": %d + } + """, numAllocations); + + Request request = new Request("POST", endPoint); + request.setJsonEntity(body); + return client().performRequest(request); + } + protected void stopMlNodeDeployment(String deploymentId) throws IOException { String endpoint = "/_ml/trained_models/" + deploymentId + "/deployment/_stop"; Request request = new Request("POST", endpoint); diff --git a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java index cbc50c361e3b5..37de2caadb475 100644 --- a/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java +++ b/x-pack/plugin/inference/qa/inference-service-tests/src/javaRestTest/java/org/elasticsearch/xpack/inference/InferenceCrudIT.java @@ -24,6 +24,7 @@ import java.util.stream.Stream; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalToIgnoringCase; import static org.hamcrest.Matchers.hasSize; @@ -326,4 +327,9 @@ public void testSupportedStream() throws Exception { deleteModel(modelId); } } + + public void testGetZeroModels() throws IOException { + var models = getModels("_all", TaskType.RERANK); + assertThat(models, empty()); + } } diff --git a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle index 1d5369468b054..0bc4813f25137 100644 --- a/x-pack/plugin/inference/qa/mixed-cluster/build.gradle +++ b/x-pack/plugin/inference/qa/mixed-cluster/build.gradle @@ -20,7 +20,7 @@ dependencies { // inference is available in 8.11 or later def supportedVersion = bwcVersion -> { - return bwcVersion.onOrAfter(Version.fromString("8.11.0")); + return bwcVersion.onOrAfter(Version.fromString("8.11.0")) && bwcVersion != VersionProperties.elasticsearchVersion } BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java index edcec45b50a16..01e663df4a3ea 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/action/TransportGetInferenceModelAction.java @@ -9,13 +9,13 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.inference.InferenceServiceRegistry; -import org.elasticsearch.inference.ModelConfigurations; +import org.elasticsearch.inference.Model; import org.elasticsearch.inference.TaskType; import org.elasticsearch.inference.UnparsedModel; import org.elasticsearch.injection.guice.Inject; @@ -29,8 +29,11 @@ import org.elasticsearch.xpack.inference.registry.ModelRegistry; import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; import java.util.List; import java.util.concurrent.Executor; +import java.util.stream.Collectors; public class TransportGetInferenceModelAction extends HandledTransportAction< GetInferenceModelAction.Request, @@ -96,39 +99,77 @@ private void getSingleModel( var model = service.get() .parsePersistedConfig(unparsedModel.inferenceEntityId(), unparsedModel.taskType(), unparsedModel.settings()); - delegate.onResponse(new GetInferenceModelAction.Response(List.of(model.getConfigurations()))); + + service.get() + .updateModelsWithDynamicFields( + List.of(model), + delegate.delegateFailureAndWrap( + (l2, updatedModels) -> l2.onResponse( + new GetInferenceModelAction.Response( + updatedModels.stream().map(Model::getConfigurations).collect(Collectors.toList()) + ) + ) + ) + ); })); } private void getAllModels(boolean persistDefaultEndpoints, ActionListener listener) { modelRegistry.getAllModels( persistDefaultEndpoints, - listener.delegateFailureAndWrap((l, models) -> executor.execute(ActionRunnable.supply(l, () -> parseModels(models)))) + listener.delegateFailureAndWrap((l, models) -> executor.execute(() -> parseModels(models, listener))) ); } private void getModelsByTaskType(TaskType taskType, ActionListener listener) { modelRegistry.getModelsByTaskType( taskType, - listener.delegateFailureAndWrap((l, models) -> executor.execute(ActionRunnable.supply(l, () -> parseModels(models)))) + listener.delegateFailureAndWrap((l, models) -> executor.execute(() -> parseModels(models, listener))) ); } - private GetInferenceModelAction.Response parseModels(List unparsedModels) { - var parsedModels = new ArrayList(); + private void parseModels(List unparsedModels, ActionListener listener) { + if (unparsedModels.isEmpty()) { + listener.onResponse(new GetInferenceModelAction.Response(List.of())); + return; + } - for (var unparsedModel : unparsedModels) { - var service = serviceRegistry.getService(unparsedModel.service()); - if (service.isEmpty()) { - throw serviceNotFoundException(unparsedModel.service(), unparsedModel.inferenceEntityId()); + var parsedModelsByService = new HashMap>(); + try { + for (var unparsedModel : unparsedModels) { + var service = serviceRegistry.getService(unparsedModel.service()); + if (service.isEmpty()) { + throw serviceNotFoundException(unparsedModel.service(), unparsedModel.inferenceEntityId()); + } + var list = parsedModelsByService.computeIfAbsent(service.get().name(), s -> new ArrayList<>()); + list.add( + service.get() + .parsePersistedConfig(unparsedModel.inferenceEntityId(), unparsedModel.taskType(), unparsedModel.settings()) + ); } - parsedModels.add( - service.get() - .parsePersistedConfig(unparsedModel.inferenceEntityId(), unparsedModel.taskType(), unparsedModel.settings()) - .getConfigurations() + + var groupedListener = new GroupedActionListener>( + parsedModelsByService.entrySet().size(), + listener.delegateFailureAndWrap((delegate, listOfListOfModels) -> { + var modifiable = new ArrayList(); + for (var l : listOfListOfModels) { + modifiable.addAll(l); + } + modifiable.sort(Comparator.comparing(Model::getInferenceEntityId)); + delegate.onResponse( + new GetInferenceModelAction.Response(modifiable.stream().map(Model::getConfigurations).collect(Collectors.toList())) + ); + }) ); + + for (var entry : parsedModelsByService.entrySet()) { + serviceRegistry.getService(entry.getKey()) + .get() // must be non-null to get this far + .updateModelsWithDynamicFields(entry.getValue(), groupedListener); + } + } catch (Exception e) { + listener.onFailure(e); } - return new GetInferenceModelAction.Response(parsedModels); } private ElasticsearchStatusException serviceNotFoundException(String service, String inferenceId) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java index 04a07eeb984ec..def52e97666f9 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettings.java @@ -29,6 +29,8 @@ public class SentenceBoundaryChunkingSettings implements ChunkingSettings { public static final String NAME = "SentenceBoundaryChunkingSettings"; private static final ChunkingStrategy STRATEGY = ChunkingStrategy.SENTENCE; + private static final int MAX_CHUNK_SIZE_LOWER_LIMIT = 20; + private static final int MAX_CHUNK_SIZE_UPPER_LIMIT = 300; private static final Set VALID_KEYS = Set.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), @@ -62,9 +64,11 @@ public static SentenceBoundaryChunkingSettings fromMap(Map map) ); } - Integer maxChunkSize = ServiceUtils.extractRequiredPositiveInteger( + Integer maxChunkSize = ServiceUtils.extractRequiredPositiveIntegerBetween( map, ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), + MAX_CHUNK_SIZE_LOWER_LIMIT, + MAX_CHUNK_SIZE_UPPER_LIMIT, ModelConfigurations.CHUNKING_SETTINGS, validationException ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java index 5b91e122b9c80..7fb0fdc91bf72 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettings.java @@ -28,6 +28,8 @@ public class WordBoundaryChunkingSettings implements ChunkingSettings { public static final String NAME = "WordBoundaryChunkingSettings"; private static final ChunkingStrategy STRATEGY = ChunkingStrategy.WORD; + private static final int MAX_CHUNK_SIZE_LOWER_LIMIT = 10; + private static final int MAX_CHUNK_SIZE_UPPER_LIMIT = 300; private static final Set VALID_KEYS = Set.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), @@ -56,9 +58,11 @@ public static WordBoundaryChunkingSettings fromMap(Map map) { ); } - Integer maxChunkSize = ServiceUtils.extractRequiredPositiveInteger( + Integer maxChunkSize = ServiceUtils.extractRequiredPositiveIntegerBetween( map, ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), + MAX_CHUNK_SIZE_LOWER_LIMIT, + MAX_CHUNK_SIZE_UPPER_LIMIT, ModelConfigurations.CHUNKING_SETTINGS, validationException ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java index bf74ca86a969a..0b2268a448c8a 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/external/http/StreamingHttpResultPublisher.java @@ -96,11 +96,10 @@ public void consumeContent(ContentDecoder contentDecoder, IOControl ioControl) t try { var consumed = inputBuffer.consumeContent(contentDecoder); - var allBytes = new byte[consumed]; - inputBuffer.read(allBytes); - - // we can have empty bytes, don't bother sending them - if (allBytes.length > 0) { + // we could have read 0 bytes if the body was delayed getting in, we need to return out so apache can load the body/footer + if (consumed > 0) { + var allBytes = new byte[consumed]; + inputBuffer.read(allBytes); queue.offer(() -> { subscriber.onNext(new HttpResult(response, allBytes)); var currentBytesInQueue = bytesInQueue.updateAndGet(current -> Long.max(0, current - allBytes.length)); @@ -111,18 +110,17 @@ public void consumeContent(ContentDecoder contentDecoder, IOControl ioControl) t } } }); - } - // always check if totalByteSize > the configured setting in case the settings change - if (bytesInQueue.accumulateAndGet(allBytes.length, Long::sum) >= settings.getMaxResponseSize().getBytes()) { - pauseProducer(ioControl); - } + // always check if totalByteSize > the configured setting in case the settings change + if (bytesInQueue.accumulateAndGet(allBytes.length, Long::sum) >= settings.getMaxResponseSize().getBytes()) { + pauseProducer(ioControl); + } - // always run in case we're waking up from a pause and need to start a new thread - taskRunner.requestNextRun(); + taskRunner.requestNextRun(); - if (listenerCalled.compareAndSet(false, true)) { - listener.onResponse(this); + if (listenerCalled.compareAndSet(false, true)) { + listener.onResponse(this); + } } } finally { inputBuffer.reset(); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java index fb18cfb4959c7..4c07516051287 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/mapper/SemanticTextFieldMapper.java @@ -617,8 +617,7 @@ private String generateInvalidQueryInferenceResultsMessage(StringBuilder baseMes @Override public BlockLoader blockLoader(MappedFieldType.BlockLoaderContext blContext) { SourceValueFetcher fetcher = SourceValueFetcher.toString(blContext.sourcePaths(name().concat(".text"))); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.BytesRefsBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll(), sourceMode); + return new BlockSourceReader.BytesRefsBlockLoader(fetcher, BlockSourceReader.lookupMatchingAll()); } } diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java index 8bccf6e7d1022..342199dc51db8 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rank/textsimilarity/TextSimilarityRankRetrieverBuilder.java @@ -10,7 +10,6 @@ import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.common.ParsingException; import org.elasticsearch.features.NodeFeature; -import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.search.builder.PointInTimeBuilder; @@ -20,7 +19,6 @@ import org.elasticsearch.search.retriever.CompoundRetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverBuilder; import org.elasticsearch.search.retriever.RetrieverParserContext; -import org.elasticsearch.search.retriever.rankdoc.RankDocsQueryBuilder; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; @@ -158,33 +156,18 @@ protected RankDoc[] combineInnerRetrieverResults(List rankResults) { return textSimilarityRankDocs; } - @Override - public QueryBuilder explainQuery() { - // the original matching set of the TextSimilarityRank retriever is specified by its nested retriever - return new RankDocsQueryBuilder(rankDocs, new QueryBuilder[] { innerRetrievers.getFirst().retriever().explainQuery() }, true); - } - @Override protected SearchSourceBuilder createSearchSourceBuilder(PointInTimeBuilder pit, RetrieverBuilder retrieverBuilder) { var sourceBuilder = new SearchSourceBuilder().pointInTimeBuilder(pit) .trackTotalHits(false) .storedFields(new StoredFieldsContext(false)) .size(rankWindowSize); + // apply the pre-filters downstream once if (preFilterQueryBuilders.isEmpty() == false) { retrieverBuilder.getPreFilterQueryBuilders().addAll(preFilterQueryBuilders); } retrieverBuilder.extractToSearchSourceBuilder(sourceBuilder, true); - // apply the pre-filters - if (preFilterQueryBuilders.size() > 0) { - QueryBuilder query = sourceBuilder.query(); - BoolQueryBuilder newQuery = new BoolQueryBuilder(); - if (query != null) { - newQuery.must(query); - } - preFilterQueryBuilders.forEach(newQuery::filter); - sourceBuilder.query(newQuery); - } sourceBuilder.rankBuilder( new TextSimilarityRankBuilder(this.field, this.inferenceId, this.inferenceText, this.rankWindowSize, this.minScore) ); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java index c0e3c78b12f13..9e7f8712b4087 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/ServiceUtils.java @@ -435,6 +435,32 @@ public static Integer extractRequiredPositiveIntegerLessThanOrEqualToMax( return field; } + public static Integer extractRequiredPositiveIntegerBetween( + Map map, + String settingName, + int minValue, + int maxValue, + String scope, + ValidationException validationException + ) { + Integer field = extractRequiredPositiveInteger(map, settingName, scope, validationException); + + if (field != null && field < minValue) { + validationException.addValidationError( + ServiceUtils.mustBeGreaterThanOrEqualNumberErrorMessage(settingName, scope, field, minValue) + ); + return null; + } + if (field != null && field > maxValue) { + validationException.addValidationError( + ServiceUtils.mustBeLessThanOrEqualNumberErrorMessage(settingName, scope, field, maxValue) + ); + return null; + } + + return field; + } + public static Integer extractOptionalPositiveInteger( Map map, String settingName, diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java index d38def8dca47f..8b2969c39b7ba 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalModel.java @@ -21,7 +21,7 @@ public abstract class ElasticsearchInternalModel extends Model { - protected final ElasticsearchInternalServiceSettings internalServiceSettings; + protected ElasticsearchInternalServiceSettings internalServiceSettings; public ElasticsearchInternalModel( String inferenceEntityId, @@ -91,6 +91,10 @@ public ElasticsearchInternalServiceSettings getServiceSettings() { return (ElasticsearchInternalServiceSettings) super.getServiceSettings(); } + public void updateNumAllocations(Integer numAllocations) { + this.internalServiceSettings.setNumAllocations(numAllocations); + } + @Override public String toString() { return Strings.toString(this.getConfigurations()); diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java index 389a9fa369c21..6732e5719b897 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalService.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.inference.results.InferenceTextEmbeddingFloatResults; import org.elasticsearch.xpack.core.inference.results.RankedDocsResults; import org.elasticsearch.xpack.core.inference.results.SparseEmbeddingResults; +import org.elasticsearch.xpack.core.ml.action.GetDeploymentStatsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; @@ -56,6 +57,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -83,8 +85,8 @@ public class ElasticsearchInternalService extends BaseElasticsearchInternalServi ); public static final int EMBEDDING_MAX_BATCH_SIZE = 10; - public static final String DEFAULT_ELSER_ID = ".elser-2"; - public static final String DEFAULT_E5_ID = ".multi-e5-small"; + public static final String DEFAULT_ELSER_ID = ".elser-2-elasticsearch"; + public static final String DEFAULT_E5_ID = ".multilingual-e5-small-elasticsearch"; private static final Logger logger = LogManager.getLogger(ElasticsearchInternalService.class); private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ElasticsearchInternalService.class); @@ -786,11 +788,50 @@ public List defaultConfigIds() { ); } - /** - * Default configurations that can be out of the box without creating an endpoint first. - * @param defaultsListener Config listener - */ @Override + public void updateModelsWithDynamicFields(List models, ActionListener> listener) { + + if (models.isEmpty()) { + listener.onResponse(models); + return; + } + + var modelsByDeploymentIds = new HashMap(); + for (var model : models) { + assert model instanceof ElasticsearchInternalModel; + + if (model instanceof ElasticsearchInternalModel esModel) { + modelsByDeploymentIds.put(esModel.mlNodeDeploymentId(), esModel); + } else { + listener.onFailure( + new ElasticsearchStatusException( + "Cannot update model [{}] as it is not an Elasticsearch service model", + RestStatus.INTERNAL_SERVER_ERROR, + model.getInferenceEntityId() + ) + ); + return; + } + } + + String deploymentIds = String.join(",", modelsByDeploymentIds.keySet()); + client.execute( + GetDeploymentStatsAction.INSTANCE, + new GetDeploymentStatsAction.Request(deploymentIds), + ActionListener.wrap(stats -> { + for (var deploymentStats : stats.getStats().results()) { + var model = modelsByDeploymentIds.get(deploymentStats.getDeploymentId()); + model.updateNumAllocations(deploymentStats.getNumberOfAllocations()); + } + listener.onResponse(new ArrayList<>(modelsByDeploymentIds.values())); + }, e -> { + logger.warn("Get deployment stats failed, cannot update the endpoint's number of allocations", e); + // continue with the original response + listener.onResponse(models); + }) + ); + } + public void defaultConfigs(ActionListener> defaultsListener) { preferredModelVariantFn.accept(defaultsListener.delegateFailureAndWrap((delegate, preferredModelVariant) -> { if (PreferredModelVariant.LINUX_X86_OPTIMIZED.equals(preferredModelVariant)) { diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java index fedf48fb583a3..962c939146ef2 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceSettings.java @@ -39,7 +39,7 @@ public class ElasticsearchInternalServiceSettings implements ServiceSettings { public static final String DEPLOYMENT_ID = "deployment_id"; public static final String ADAPTIVE_ALLOCATIONS = "adaptive_allocations"; - private final Integer numAllocations; + private Integer numAllocations; private final int numThreads; private final String modelId; private final AdaptiveAllocationsSettings adaptiveAllocationsSettings; @@ -172,6 +172,10 @@ public ElasticsearchInternalServiceSettings(StreamInput in) throws IOException { : null; } + public void setNumAllocations(Integer numAllocations) { + this.numAllocations = numAllocations; + } + @Override public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.INFERENCE_ADAPTIVE_ALLOCATIONS)) { @@ -194,6 +198,10 @@ public String modelId() { return modelId; } + public String deloymentId() { + return modelId; + } + public Integer getNumAllocations() { return numAllocations; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java index 5b9625073e6c6..235a3730ce4f6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsBuilderTests.java @@ -38,25 +38,27 @@ public void testValidChunkingSettingsMap() { } private Map, ChunkingSettings> chunkingSettingsMapToChunkingSettings() { - var maxChunkSize = randomNonNegativeInt(); - var overlap = randomIntBetween(1, maxChunkSize / 2); + var maxChunkSizeWordBoundaryChunkingSettings = randomIntBetween(10, 300); + var overlap = randomIntBetween(1, maxChunkSizeWordBoundaryChunkingSettings / 2); + var maxChunkSizeSentenceBoundaryChunkingSettings = randomIntBetween(20, 300); + return Map.of( Map.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingStrategy.WORD.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), - maxChunkSize, + maxChunkSizeWordBoundaryChunkingSettings, ChunkingSettingsOptions.OVERLAP.toString(), overlap ), - new WordBoundaryChunkingSettings(maxChunkSize, overlap), + new WordBoundaryChunkingSettings(maxChunkSizeWordBoundaryChunkingSettings, overlap), Map.of( ChunkingSettingsOptions.STRATEGY.toString(), ChunkingStrategy.SENTENCE.toString(), ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), - maxChunkSize + maxChunkSizeSentenceBoundaryChunkingSettings ), - new SentenceBoundaryChunkingSettings(maxChunkSize, 1) + new SentenceBoundaryChunkingSettings(maxChunkSizeSentenceBoundaryChunkingSettings, 1) ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java index 8373ae93354b1..2832c2f64e0e6 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/ChunkingSettingsTests.java @@ -21,11 +21,11 @@ public static ChunkingSettings createRandomChunkingSettings() { switch (randomStrategy) { case WORD -> { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); return new WordBoundaryChunkingSettings(maxChunkSize, randomIntBetween(1, maxChunkSize / 2)); } case SENTENCE -> { - return new SentenceBoundaryChunkingSettings(randomNonNegativeInt(), randomBoolean() ? 0 : 1); + return new SentenceBoundaryChunkingSettings(randomIntBetween(20, 300), randomBoolean() ? 0 : 1); } default -> throw new IllegalArgumentException("Unsupported random strategy [" + randomStrategy + "]"); } @@ -38,13 +38,13 @@ public static Map createRandomChunkingSettingsMap() { switch (randomStrategy) { case WORD -> { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); chunkingSettingsMap.put(ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), maxChunkSize); chunkingSettingsMap.put(ChunkingSettingsOptions.OVERLAP.toString(), randomIntBetween(1, maxChunkSize / 2)); } case SENTENCE -> { - chunkingSettingsMap.put(ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), randomNonNegativeInt()); + chunkingSettingsMap.put(ChunkingSettingsOptions.MAX_CHUNK_SIZE.toString(), randomIntBetween(20, 300)); } default -> { } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java index 5687ebc4dbae7..afce8c57e0350 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkerTests.java @@ -318,7 +318,8 @@ public void testChunkSplitLargeChunkSizesWithChunkingSettings() { } public void testInvalidChunkingSettingsProvided() { - ChunkingSettings chunkingSettings = new WordBoundaryChunkingSettings(randomNonNegativeInt(), randomNonNegativeInt()); + var maxChunkSize = randomIntBetween(10, 300); + ChunkingSettings chunkingSettings = new WordBoundaryChunkingSettings(maxChunkSize, randomIntBetween(1, maxChunkSize / 2)); assertThrows(IllegalArgumentException.class, () -> { new SentenceBoundaryChunker().chunk(TEST_TEXT, chunkingSettings); }); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java index fe97d7eb3af54..47a1a116ba21e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/SentenceBoundaryChunkingSettingsTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.inference.ChunkingStrategy; import org.elasticsearch.test.AbstractWireSerializingTestCase; -import org.elasticsearch.test.ESTestCase; import java.io.IOException; import java.util.HashMap; @@ -28,14 +27,14 @@ public void testMaxChunkSizeNotProvided() { } public void testInvalidInputsProvided() { - var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(randomNonNegativeInt())); + var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(randomIntBetween(20, 300))); chunkingSettingsMap.put(randomAlphaOfLength(10), randomNonNegativeInt()); assertThrows(ValidationException.class, () -> { SentenceBoundaryChunkingSettings.fromMap(chunkingSettingsMap); }); } public void testValidInputsProvided() { - int maxChunkSize = randomNonNegativeInt(); + int maxChunkSize = randomIntBetween(20, 300); SentenceBoundaryChunkingSettings settings = SentenceBoundaryChunkingSettings.fromMap( buildChunkingSettingsMap(Optional.of(maxChunkSize)) ); @@ -59,12 +58,12 @@ protected Writeable.Reader instanceReader() { @Override protected SentenceBoundaryChunkingSettings createTestInstance() { - return new SentenceBoundaryChunkingSettings(randomNonNegativeInt(), randomBoolean() ? 0 : 1); + return new SentenceBoundaryChunkingSettings(randomIntBetween(20, 300), randomBoolean() ? 0 : 1); } @Override protected SentenceBoundaryChunkingSettings mutateInstance(SentenceBoundaryChunkingSettings instance) throws IOException { - var chunkSize = randomValueOtherThan(instance.maxChunkSize, ESTestCase::randomNonNegativeInt); + var chunkSize = randomValueOtherThan(instance.maxChunkSize, () -> randomIntBetween(20, 300)); return new SentenceBoundaryChunkingSettings(chunkSize, instance.sentenceOverlap); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java index 08c0724f36270..ef643a4b36fdc 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkerTests.java @@ -136,7 +136,7 @@ public void testNumberOfChunksWithWordBoundaryChunkingSettings() { } public void testInvalidChunkingSettingsProvided() { - ChunkingSettings chunkingSettings = new SentenceBoundaryChunkingSettings(randomNonNegativeInt(), 0); + ChunkingSettings chunkingSettings = new SentenceBoundaryChunkingSettings(randomIntBetween(20, 300), 0); assertThrows(IllegalArgumentException.class, () -> { new WordBoundaryChunker().chunk(TEST_TEXT, chunkingSettings); }); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java index c5515f7bf0512..dd91a3c7a947e 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/chunking/WordBoundaryChunkingSettingsTests.java @@ -14,7 +14,6 @@ import java.io.IOException; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Optional; @@ -28,19 +27,20 @@ public void testMaxChunkSizeNotProvided() { public void testOverlapNotProvided() { assertThrows(ValidationException.class, () -> { - WordBoundaryChunkingSettings.fromMap(buildChunkingSettingsMap(Optional.of(randomNonNegativeInt()), Optional.empty())); + WordBoundaryChunkingSettings.fromMap(buildChunkingSettingsMap(Optional.of(randomIntBetween(10, 300)), Optional.empty())); }); } public void testInvalidInputsProvided() { - var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(randomNonNegativeInt()), Optional.of(randomNonNegativeInt())); + var maxChunkSize = randomIntBetween(10, 300); + var chunkingSettingsMap = buildChunkingSettingsMap(Optional.of(maxChunkSize), Optional.of(randomIntBetween(1, maxChunkSize / 2))); chunkingSettingsMap.put(randomAlphaOfLength(10), randomNonNegativeInt()); assertThrows(ValidationException.class, () -> { WordBoundaryChunkingSettings.fromMap(chunkingSettingsMap); }); } public void testOverlapGreaterThanHalfMaxChunkSize() { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); var overlap = randomIntBetween((maxChunkSize / 2) + 1, maxChunkSize); assertThrows(ValidationException.class, () -> { WordBoundaryChunkingSettings.fromMap(buildChunkingSettingsMap(Optional.of(maxChunkSize), Optional.of(overlap))); @@ -48,7 +48,7 @@ public void testOverlapGreaterThanHalfMaxChunkSize() { } public void testValidInputsProvided() { - int maxChunkSize = randomNonNegativeInt(); + int maxChunkSize = randomIntBetween(10, 300); int overlap = randomIntBetween(1, maxChunkSize / 2); WordBoundaryChunkingSettings settings = WordBoundaryChunkingSettings.fromMap( buildChunkingSettingsMap(Optional.of(maxChunkSize), Optional.of(overlap)) @@ -75,29 +75,14 @@ protected Writeable.Reader instanceReader() { @Override protected WordBoundaryChunkingSettings createTestInstance() { - var maxChunkSize = randomNonNegativeInt(); + var maxChunkSize = randomIntBetween(10, 300); return new WordBoundaryChunkingSettings(maxChunkSize, randomIntBetween(1, maxChunkSize / 2)); } @Override protected WordBoundaryChunkingSettings mutateInstance(WordBoundaryChunkingSettings instance) throws IOException { - var valueToMutate = randomFrom(List.of(ChunkingSettingsOptions.MAX_CHUNK_SIZE, ChunkingSettingsOptions.OVERLAP)); - var maxChunkSize = instance.maxChunkSize; - var overlap = instance.overlap; - - if (valueToMutate.equals(ChunkingSettingsOptions.MAX_CHUNK_SIZE)) { - while (maxChunkSize == instance.maxChunkSize) { - maxChunkSize = randomNonNegativeInt(); - } - - if (overlap > maxChunkSize / 2) { - overlap = randomIntBetween(1, maxChunkSize / 2); - } - } else if (valueToMutate.equals(ChunkingSettingsOptions.OVERLAP)) { - while (overlap == instance.overlap) { - overlap = randomIntBetween(1, maxChunkSize / 2); - } - } + var maxChunkSize = randomValueOtherThan(instance.maxChunkSize, () -> randomIntBetween(10, 300)); + var overlap = randomValueOtherThan(instance.overlap, () -> randomIntBetween(1, maxChunkSize / 2)); return new WordBoundaryChunkingSettings(maxChunkSize, overlap); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java index f23ea2aa414b2..7cfd231be39f3 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/InferenceEventsAssertion.java @@ -12,7 +12,6 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.inference.InferenceServiceResults; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentFactory; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; @@ -26,6 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import static org.elasticsearch.test.ESTestCase.fail; import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.CoreMatchers.is; @@ -47,7 +47,9 @@ public InferenceEventsAssertion hasFinishedStream() { } public InferenceEventsAssertion hasNoErrors() { - MatcherAssert.assertThat("Expected no errors from stream.", error, Matchers.nullValue()); + if (error != null) { + fail(error, "Expected no errors from stream."); + } return this; } @@ -66,7 +68,7 @@ public InferenceEventsAssertion hasErrorWithStatusCode(int statusCode) { } t = t.getCause(); } - ESTestCase.fail(error, "Expected an underlying ElasticsearchStatusException."); + fail(error, "Expected an underlying ElasticsearchStatusException."); return this; } @@ -79,7 +81,7 @@ public InferenceEventsAssertion hasErrorContaining(String message) { } t = t.getCause(); } - ESTestCase.fail(error, "Expected exception to contain string: " + message); + fail(error, "Expected exception to contain string: " + message); return this; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java index ca48d5427d18b..e3df0f0b5a2e1 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/ServiceUtilsTests.java @@ -605,6 +605,60 @@ public void testExtractRequiredPositiveIntegerLessThanOrEqualToMax_AddsErrorWhen assertThat(validation.validationErrors().get(1), is("[scope] does not contain the required setting [not_key]")); } + public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsBetweenMinAndMax() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue + 2, minValue + 10); + testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, randomIntBetween(minValue + 1, maxValue - 1)); + } + + public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsEqualToMin() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue + 1, minValue + 10); + testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, minValue); + } + + public void testExtractRequiredPositiveIntegerBetween_ReturnsValueWhenValueIsEqualToMax() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue + 1, minValue + 10); + testExtractRequiredPositiveIntegerBetween_Successful(minValue, maxValue, maxValue); + } + + private void testExtractRequiredPositiveIntegerBetween_Successful(int minValue, int maxValue, int actualValue) { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", actualValue)); + var parsedInt = ServiceUtils.extractRequiredPositiveIntegerBetween(map, "key", minValue, maxValue, "scope", validation); + + assertThat(validation.validationErrors(), hasSize(1)); + assertNotNull(parsedInt); + assertThat(parsedInt, is(actualValue)); + assertTrue(map.isEmpty()); + } + + public void testExtractRequiredIntBetween_AddsErrorForValueBelowMin() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue, minValue + 10); + testExtractRequiredIntBetween_Unsuccessful(minValue, maxValue, minValue - 1); + } + + public void testExtractRequiredIntBetween_AddsErrorForValueAboveMax() { + var minValue = randomNonNegativeInt(); + var maxValue = randomIntBetween(minValue, minValue + 10); + testExtractRequiredIntBetween_Unsuccessful(minValue, maxValue, maxValue + 1); + } + + private void testExtractRequiredIntBetween_Unsuccessful(int minValue, int maxValue, int actualValue) { + var validation = new ValidationException(); + validation.addValidationError("previous error"); + Map map = modifiableMap(Map.of("key", actualValue)); + var parsedInt = ServiceUtils.extractRequiredPositiveIntegerBetween(map, "key", minValue, maxValue, "scope", validation); + + assertThat(validation.validationErrors(), hasSize(2)); + assertNull(parsedInt); + assertTrue(map.isEmpty()); + assertThat(validation.validationErrors().get(1), containsString("Invalid value")); + } + public void testExtractOptionalEnum_ReturnsNull_WhenFieldDoesNotExist() { var validation = new ValidationException(); Map map = modifiableMap(Map.of("key", "value")); diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java index 8adf75b4c0a81..48277112d9306 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/anthropic/AnthropicServiceTests.java @@ -532,7 +532,6 @@ public void testInfer_SendsCompletionRequest() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {"type": "message_start", "message": {"model": "claude, probably"}} @@ -578,7 +577,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ data: {"type": "error", "error": {"type": "request_too_large", "message": "blah"}} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java index 44b0d17d9b448..e85edf573ba96 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureaistudio/AzureAiStudioServiceTests.java @@ -1308,7 +1308,6 @@ public void testInfer_UnauthorisedResponse() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1364,7 +1363,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException, URISy } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java index 900b666c0b8fb..3408fc358cac0 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/azureopenai/AzureOpenAiServiceTests.java @@ -1425,7 +1425,6 @@ private void testChunkedInfer(AzureOpenAiEmbeddingsModel model) throws IOExcepti } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1484,7 +1483,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException, URISy } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java index cf114db45619f..758c38166778b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/cohere/CohereServiceTests.java @@ -1635,7 +1635,6 @@ public void testDefaultSimilarity() { assertEquals(SimilarityMeasure.DOT_PRODUCT, CohereService.defaultSimilarity()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ {"event_type":"text-generation", "text":"hello"} @@ -1669,7 +1668,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { "event_type":"stream-end", "finish_reason":"ERROR", "response":{ "text": "how dare you" } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java index b82b8a08f2175..5ec66687752a8 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElasticsearchInternalServiceTests.java @@ -1561,8 +1561,8 @@ public void testEmbeddingTypeFromTaskTypeAndSettings() { public void testIsDefaultId() { var service = createService(mock(Client.class)); - assertTrue(service.isDefaultId(".elser-2")); - assertTrue(service.isDefaultId(".multi-e5-small")); + assertTrue(service.isDefaultId(".elser-2-elasticsearch")); + assertTrue(service.isDefaultId(".multilingual-e5-small-elasticsearch")); assertFalse(service.isDefaultId("foo")); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModelTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModelTests.java new file mode 100644 index 0000000000000..96cd42efa42f5 --- /dev/null +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elasticsearch/ElserInternalModelTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.inference.services.elasticsearch; + +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; + +public class ElserInternalModelTests extends ESTestCase { + public void testUpdateNumAllocation() { + var model = new ElserInternalModel( + "foo", + TaskType.SPARSE_EMBEDDING, + ElasticsearchInternalService.NAME, + new ElserInternalServiceSettings(null, 1, "elser", null), + new ElserMlNodeTaskSettings(), + null + ); + + model.updateNumAllocations(1); + assertEquals(1, model.getServiceSettings().getNumAllocations().intValue()); + + model.updateNumAllocations(null); + assertNull(model.getServiceSettings().getNumAllocations()); + } +} diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java index beba9b1a92477..cf1438b334478 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/openai/OpenAiServiceTests.java @@ -1007,7 +1007,6 @@ public void testInfer_SendsRequest() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest() throws Exception { String responseJson = """ data: {\ @@ -1057,7 +1056,6 @@ private InferenceServiceResults streamChatCompletion() throws IOException { } } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/114385") public void testInfer_StreamRequest_ErrorResponse() throws Exception { String responseJson = """ { diff --git a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java similarity index 59% rename from x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java rename to x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java index edecf4eb9669e..f5ac107628d1a 100644 --- a/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbRestIT.java +++ b/x-pack/plugin/logsdb/qa/with-basic/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsdbWithBasicRestIT.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.logsdb; +import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.test.cluster.ElasticsearchCluster; @@ -19,7 +20,7 @@ import java.util.List; import java.util.Map; -public class LogsdbRestIT extends ESRestTestCase { +public class LogsdbWithBasicRestIT extends ESRestTestCase { @ClassRule public static ElasticsearchCluster cluster = ElasticsearchCluster.local() @@ -96,7 +97,7 @@ public void testLogsdbOverrideSyntheticSourceModeInMapping() throws IOException assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); } - public void testLogsdbNoOverrideSyntheticSourceSetting() throws IOException { + public void testLogsdbOverrideSyntheticSourceSetting() throws IOException { final String index = "test-index"; createIndex( index, @@ -104,6 +105,70 @@ public void testLogsdbNoOverrideSyntheticSourceSetting() throws IOException { ); var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); assertEquals("logsdb", settings.get("index.mode")); - assertEquals(SourceFieldMapper.Mode.SYNTHETIC.toString(), settings.get("index.mapping.source.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } + + public void testLogsdbOverrideNullSyntheticSourceSetting() throws IOException { + final String index = "test-index"; + createIndex(index, Settings.builder().put("index.mode", "logsdb").putNull("index.mapping.source.mode").build()); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } + + public void testLogsdbOverrideSyntheticSourceSettingInTemplate() throws IOException { + var request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["test-*"], + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "mapping": { + "source": { + "mode": "synthetic" + } + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + final String index = "test-index"; + createIndex(index); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); + } + + public void testLogsdbOverrideNullInTemplate() throws IOException { + var request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(""" + { + "index_patterns": ["test-*"], + "template": { + "settings":{ + "index": { + "mode": "logsdb", + "mapping": { + "source": { + "mode": null + } + } + } + } + } + } + """); + assertOK(client().performRequest(request)); + + final String index = "test-index"; + createIndex(index); + var settings = (Map) ((Map) getIndexSettings(index).get(index)).get("settings"); + assertEquals("logsdb", settings.get("index.mode")); + assertEquals(SourceFieldMapper.Mode.STORED.toString(), settings.get("index.mapping.source.mode")); } } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsDbSourceModeMigrationIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsDbSourceModeMigrationIT.java new file mode 100644 index 0000000000000..adb23567e3933 --- /dev/null +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsDbSourceModeMigrationIT.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.logsdb; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; + +public class LogsDbSourceModeMigrationIT extends LogsIndexModeRestTestIT { + public static final String INDEX_TEMPLATE = """ + { + "index_patterns": ["my-logs-*-*"], + "priority": 100, + "data_stream": {}, + "composed_of": [ + "my-logs-mapping", + "my-logs-original-source", + "my-logs-migrated-source" + ], + "ignore_missing_component_templates": ["my-logs-original-source", "my-logs-migrated-source"] + } + """; + + public static final String MAPPING_COMPONENT_TEMPLATE = """ + { + "template": { + "settings": { + "index": { + "mode": "logsdb" + } + }, + "mappings": { + "properties": { + "@timestamp": { + "type": "date", + "format": "epoch_millis" + }, + "message": { + "type": "text" + }, + "method": { + "type": "keyword" + }, + "hits": { + "type": "long" + } + } + } + } + }"""; + + public static final String STORED_SOURCE_COMPONENT_TEMPLATE = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "stored" + } + } + } + }"""; + + public static final String SYNTHETIC_SOURCE_COMPONENT_TEMPLATE = """ + { + "template": { + "settings": { + "index": { + "mapping.source.mode": "synthetic" + } + } + } + }"""; + + @ClassRule() + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .module("constant-keyword") + .module("data-streams") + .module("mapper-extras") + .module("x-pack-aggregate-metric") + .module("x-pack-stack") + .setting("xpack.security.enabled", "false") + .setting("xpack.otel_data.registry.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("cluster.logsdb.enabled", "true") + .setting("stack.templates.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Before + public void setup() { + client = client(); + } + + private RestClient client; + + public void testSwitchFromStoredToSyntheticSource() throws IOException { + assertOK(putComponentTemplate(client, "my-logs-mapping", MAPPING_COMPONENT_TEMPLATE)); + assertOK(putComponentTemplate(client, "my-logs-original-source", STORED_SOURCE_COMPONENT_TEMPLATE)); + + assertOK(putTemplate(client, "my-logs", INDEX_TEMPLATE)); + assertOK(createDataStream(client, "my-logs-ds-test")); + + var initialSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 0), + "index.mapping.source.mode" + ); + assertThat(initialSourceMode, equalTo("stored")); + var initialIndexMode = (String) getSetting(client, getDataStreamBackingIndex(client, "my-logs-ds-test", 0), "index.mode"); + assertThat(initialIndexMode, equalTo("logsdb")); + + var indexedWithStoredSource = new ArrayList(); + var indexedWithSyntheticSource = new ArrayList(); + for (int i = 0; i < 10; i++) { + indexedWithStoredSource.add(generateDoc()); + indexedWithSyntheticSource.add(generateDoc()); + } + + Response storedSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithStoredSource, 0); + assertOK(storedSourceBulkResponse); + assertThat(entityAsMap(storedSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + assertOK(putComponentTemplate(client, "my-logs-migrated-source", SYNTHETIC_SOURCE_COMPONENT_TEMPLATE)); + var rolloverResponse = rolloverDataStream(client, "my-logs-ds-test"); + assertOK(rolloverResponse); + assertThat(entityAsMap(rolloverResponse).get("rolled_over"), is(true)); + + var finalSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 1), + "index.mapping.source.mode" + ); + assertThat(finalSourceMode, equalTo("synthetic")); + + Response syntheticSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithSyntheticSource, 10); + assertOK(syntheticSourceBulkResponse); + assertThat(entityAsMap(syntheticSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + var allDocs = Stream.concat(indexedWithStoredSource.stream(), indexedWithSyntheticSource.stream()).toList(); + + var sourceList = search(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(allDocs.size()), "my-logs-ds-test"); + assertThat(sourceList.size(), equalTo(allDocs.size())); + + for (int i = 0; i < sourceList.size(); i++) { + var expected = XContentHelper.convertToMap(BytesReference.bytes(allDocs.get(i)), false, XContentType.JSON).v2(); + assertThat(sourceList.get(i), equalTo(expected)); + } + } + + public void testSwitchFromSyntheticToStoredSource() throws IOException { + assertOK(putComponentTemplate(client, "my-logs-mapping", MAPPING_COMPONENT_TEMPLATE)); + assertOK(putComponentTemplate(client, "my-logs-original-source", SYNTHETIC_SOURCE_COMPONENT_TEMPLATE)); + + assertOK(putTemplate(client, "my-logs", INDEX_TEMPLATE)); + assertOK(createDataStream(client, "my-logs-ds-test")); + + var initialSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 0), + "index.mapping.source.mode" + ); + assertThat(initialSourceMode, equalTo("synthetic")); + var initialIndexMode = (String) getSetting(client, getDataStreamBackingIndex(client, "my-logs-ds-test", 0), "index.mode"); + assertThat(initialIndexMode, equalTo("logsdb")); + + var indexedWithSyntheticSource = new ArrayList(); + var indexedWithStoredSource = new ArrayList(); + for (int i = 0; i < 10; i++) { + indexedWithSyntheticSource.add(generateDoc()); + indexedWithStoredSource.add(generateDoc()); + } + + Response syntheticSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithSyntheticSource, 0); + assertOK(syntheticSourceBulkResponse); + assertThat(entityAsMap(syntheticSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + assertOK(putComponentTemplate(client, "my-logs-migrated-source", STORED_SOURCE_COMPONENT_TEMPLATE)); + var rolloverResponse = rolloverDataStream(client, "my-logs-ds-test"); + assertOK(rolloverResponse); + assertThat(entityAsMap(rolloverResponse).get("rolled_over"), is(true)); + + var finalSourceMode = (String) getSetting( + client, + getDataStreamBackingIndex(client, "my-logs-ds-test", 1), + "index.mapping.source.mode" + ); + assertThat(finalSourceMode, equalTo("stored")); + + Response storedSourceBulkResponse = bulkIndex(client, "my-logs-ds-test", indexedWithStoredSource, 10); + assertOK(storedSourceBulkResponse); + assertThat(entityAsMap(storedSourceBulkResponse).get("errors"), Matchers.equalTo(false)); + + var allDocs = Stream.concat(indexedWithSyntheticSource.stream(), indexedWithStoredSource.stream()).toList(); + + var sourceList = search(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).size(allDocs.size()), "my-logs-ds-test"); + assertThat(sourceList.size(), equalTo(allDocs.size())); + + for (int i = 0; i < sourceList.size(); i++) { + var expected = XContentHelper.convertToMap(BytesReference.bytes(allDocs.get(i)), false, XContentType.JSON).v2(); + assertThat(sourceList.get(i), equalTo(expected)); + } + } + + private static Response bulkIndex(RestClient client, String dataStreamName, List documents, int startId) + throws IOException { + var sb = new StringBuilder(); + int id = startId; + for (var document : documents) { + sb.append(Strings.format("{ \"create\": { \"_id\" : \"%d\" } }", id)).append("\n"); + sb.append(Strings.toString(document)).append("\n"); + id++; + } + + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + bulkRequest.setJsonEntity(sb.toString()); + bulkRequest.addParameter("refresh", "true"); + return client.performRequest(bulkRequest); + } + + @SuppressWarnings("unchecked") + private List> search(SearchSourceBuilder search, String dataStreamName) throws IOException { + var request = new Request("GET", "/" + dataStreamName + "/_search"); + request.setJsonEntity(Strings.toString(search)); + var searchResponse = client.performRequest(request); + assertOK(searchResponse); + + Map searchResponseMap = XContentHelper.convertToMap( + XContentType.JSON.xContent(), + searchResponse.getEntity().getContent(), + false + ); + var hitsMap = (Map) searchResponseMap.get("hits"); + + var hitsList = (List>) hitsMap.get("hits"); + assertThat(hitsList.size(), greaterThan(0)); + + return hitsList.stream() + .sorted(Comparator.comparingInt((Map hit) -> Integer.parseInt((String) hit.get("_id")))) + .map(hit -> (Map) hit.get("_source")) + .toList(); + } + + private static XContentBuilder generateDoc() throws IOException { + var doc = XContentFactory.jsonBuilder(); + doc.startObject(); + { + doc.field("@timestamp", Long.toString(randomMillisUpToYear9999())); + doc.field("message", randomAlphaOfLengthBetween(20, 50)); + doc.field("method", randomAlphaOfLength(3)); + doc.field("hits", randomLong()); + } + doc.endObject(); + + return doc; + } +} diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java index c5ccee1d36b72..f529b9fa1db96 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeCustomSettingsIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.logsdb; -import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.Settings; @@ -496,16 +495,6 @@ public void testIgnoreAboveSetting() throws IOException { } } - private static Map getMapping(final RestClient client, final String indexName) throws IOException { - final Request request = new Request("GET", "/" + indexName + "/_mapping"); - - Map mappings = ((Map>) entityAsMap(client.performRequest(request)).get(indexName)).get( - "mappings" - ); - - return mappings; - } - private Function> subObject(String key) { return (mapAsObject) -> (Map) ((Map) mapAsObject).get(key); } diff --git a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java index dbee5d1d2de8c..cc7f5bdb33871 100644 --- a/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java +++ b/x-pack/plugin/logsdb/src/javaRestTest/java/org/elasticsearch/xpack/logsdb/LogsIndexModeRestTestIT.java @@ -98,4 +98,15 @@ protected static Response putClusterSetting(final RestClient client, final Strin request.setJsonEntity("{ \"transient\": { \"" + settingName + "\": " + settingValue + " } }"); return client.performRequest(request); } + + @SuppressWarnings("unchecked") + protected static Map getMapping(final RestClient client, final String indexName) throws IOException { + final Request request = new Request("GET", "/" + indexName + "/_mapping"); + + Map mappings = ((Map>) entityAsMap(client.performRequest(request)).get(indexName)).get( + "mappings" + ); + + return mappings; + } } diff --git a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java index f60c941c75a7c..4625fe91294d7 100644 --- a/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java +++ b/x-pack/plugin/logsdb/src/main/java/org/elasticsearch/xpack/logsdb/SyntheticSourceIndexSettingsProvider.java @@ -47,6 +47,12 @@ final class SyntheticSourceIndexSettingsProvider implements IndexSettingProvider this.mapperServiceFactory = mapperServiceFactory; } + @Override + public boolean overrulesTemplateAndRequestSettings() { + // Indicates that the provider value takes precedence over any user setting. + return true; + } + @Override public Settings getAdditionalIndexSettings( String indexName, diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_failure_store.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_failure_store.yml new file mode 100644 index 0000000000000..21e4f49fe7af5 --- /dev/null +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/20_failure_store.yml @@ -0,0 +1,99 @@ +--- +teardown: + - do: + indices.delete_data_stream: + name: my-logs-fs + ignore: 404 + + - do: + indices.delete_index_template: + name: template + ignore: 404 + + - do: + indices.delete_data_stream: + name: my-logs-db + ignore: 404 + - do: + indices.delete_index_template: + name: template1 + ignore: 404 + +--- +Test failure store with logsdb: + - requires: + test_runner_features: [ capabilities, allowed_warnings ] + capabilities: + - method: PUT + path: /{index} + capabilities: [ logsdb_index_mode ] + - method: POST + path: /_bulk + capabilities: [ 'failure_store_status' ] + - method: PUT + path: /_bulk + capabilities: [ 'failure_store_status' ] + reason: "Support for 'logsdb' index mode & failure status capability required" + + - do: + allowed_warnings: + - "index template [my-template] has index patterns [my-logs-fs*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template] will take precedence during new index creation" + indices.put_index_template: + name: my-template + body: + index_patterns: ["my-logs-fs*"] + data_stream: + failure_store: true + template: + settings: + index: + mode: logsdb + number_of_replicas: 1 + number_of_shards: 2 + - do: + allowed_warnings: + - "index template [my-template2] has index patterns [my-logs-db*] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template2] will take precedence during new index creation" + indices.put_index_template: + name: my-template2 + body: + index_patterns: [ "my-logs-db*" ] + data_stream: {} + template: + settings: + index: + mode: logsdb + number_of_replicas: 1 + number_of_shards: 2 + + - do: + bulk: + refresh: true + body: + - '{ "create": { "_index": "my-logs-fs"} }' + - '{"@timestamp":"2019-08-06T12:09:12.375Z", "log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer"}' + - '{ "create": { "_index": "my-logs-db"} }' + - '{ "@timestamp": "2022-01-01", "log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer" }' + - '{ "create": { "_index": "my-logs-fs"} }' + - '{"log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer"}' + - '{ "create": { "_index": "my-logs-db"} }' + - '{"log.level": "INFO", "message":"Tomcat started on port(s): 8080 (http) with context path ''", "service.name":"spring-petclinic","process.thread.name":"restartedMain","log.logger":"org.springframework.boot.web.embedded.tomcat.TomcatWebServer"}' + - is_true: errors + + # Successfully indexed to backing index + - match: { items.0.create._index: '/\.ds-my-logs-fs-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.0.create.status: 201 } + - is_false: items.0.create.failure_store + - match: { items.1.create._index: '/\.ds-my-logs-db-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.1.create.status: 201 } + - is_false: items.1.create.failure_store + + # Successfully indexed to failure store + - match: { items.2.create._index: '/\.fs-my-logs-fs-(\d{4}\.\d{2}\.\d{2}-)?000002/' } + - match: { items.2.create.status: 201 } + - match: { items.2.create.failure_store: used } + + # Rejected, eligible to go to failure store, but failure store not enabled + - match: { items.3.create._index: '/\.ds-my-logs-db-(\d{4}\.\d{2}\.\d{2}-)?000001/' } + - match: { items.3.create.status: 400 } + - match: { items.3.create.error.type: document_parsing_exception } + - match: { items.3.create.failure_store: not_enabled } diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml index 68597afda6c78..bc81d1eb67309 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/50_esql_synthetic_source_disabled_fields.yml @@ -283,7 +283,7 @@ teardown: - match: {values.0.3: "PUT"} - match: {values.0.4: false} - match: {values.0.5: "POINT (-74.006 40.7128)"} - - match: {values.0.6: null} # null is expected, because text fields aren't stored in ignored source + - match: {values.0.6: "Do. Or do not. There is no try."} - match: {values.0.7: 102} - do: @@ -296,10 +296,86 @@ teardown: - match: {columns.0.name: "message"} - match: {columns.0.type: "text"} - # null is expected, because text fields aren't stored in ignored source - - match: {values.0.0: null} - - match: {values.1.0: null} - - match: {values.2.0: null} - - match: {values.3.0: null} - - match: {values.4.0: null} - - match: {values.5.0: null} + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} + +--- +"message field with keyword multi-field with ignore_above": + - do: + indices.create: + index: my-index2 + body: + settings: + index: + mode: logsdb + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + store: false + message: + type: text + store: false + fields: + raw: + type: keyword + ignore_above: 3 + + - do: + bulk: + index: my-index2 + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2024-02-12T10:30:00Z", "host.name": "foo", "message": "No, I am your father." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:31:00Z", "host.name": "bar", "message": "Do. Or do not. There is no try." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:32:00Z", "host.name": "foo", "message": "May the force be with you." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:33:00Z", "host.name": "baz", "message": "I find your lack of faith disturbing." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:34:00Z", "host.name": "baz", "message": "Wars not make one great." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:35:00Z", "host.name": "foo", "message": "That's no moon. It's a space station." } + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | LIMIT 1' + + - match: {columns.0.name: "@timestamp"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "host.name"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "message"} + - match: {columns.2.type: "text"} + - match: {columns.3.name: "message.raw"} + - match: {columns.3.type: "keyword"} + + - match: {values.0.0: "2024-02-12T10:31:00.000Z"} + - match: {values.0.1: "bar"} + - match: {values.0.2: "Do. Or do not. There is no try."} + # Note that isn't related to synthetic source. For both stored and synthetic source null is returned: +# - match: {values.0.3: "Do. Or do not. There is no try."} + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | KEEP message | LIMIT 10' + + - match: {columns.0.name: "message"} + - match: {columns.0.type: "text"} + + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} diff --git a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml index 7e305bda4ef4e..6c840a0cf9d3a 100644 --- a/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml +++ b/x-pack/plugin/logsdb/src/yamlRestTest/resources/rest-api-spec/test/51_esql_synthetic_source.yml @@ -175,3 +175,80 @@ teardown: - match: {values.3.0: "No, I am your father."} - match: {values.4.0: "May the force be with you."} - match: {values.5.0: "That's no moon. It's a space station."} + +--- +"message field with stored keyword multi-field with ignore_above": + - do: + indices.create: + index: my-index2 + body: + settings: + index: + mode: logsdb + mappings: + properties: + "@timestamp": + type: date + host.name: + type: keyword + store: false + message: + type: text + store: false + fields: + raw: + type: keyword + store: true + + - do: + bulk: + index: my-index2 + refresh: true + body: + - { "index": { } } + - { "@timestamp": "2024-02-12T10:30:00Z", "host.name": "foo", "message": "No, I am your father." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:31:00Z", "host.name": "bar", "message": "Do. Or do not. There is no try." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:32:00Z", "host.name": "foo", "message": "May the force be with you." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:33:00Z", "host.name": "baz", "message": "I find your lack of faith disturbing." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:34:00Z", "host.name": "baz", "message": "Wars not make one great." } + - { "index": { } } + - { "@timestamp": "2024-02-12T10:35:00Z", "host.name": "foo", "message": "That's no moon. It's a space station." } + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | LIMIT 1' + + - match: {columns.0.name: "@timestamp"} + - match: {columns.0.type: "date"} + - match: {columns.1.name: "host.name"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "message"} + - match: {columns.2.type: "text"} + - match: {columns.3.name: "message.raw"} + - match: {columns.3.type: "keyword"} + + - match: {values.0.0: "2024-02-12T10:31:00.000Z"} + - match: {values.0.1: "bar"} + - match: {values.0.2: "Do. Or do not. There is no try."} + - match: {values.0.3: "Do. Or do not. There is no try."} + + - do: + esql.query: + body: + query: 'FROM my-index2 | SORT host.name, @timestamp | KEEP message | LIMIT 10' + + - match: {columns.0.name: "message"} + - match: {columns.0.type: "text"} + + - match: {values.0.0: "Do. Or do not. There is no try."} + - match: {values.1.0: "I find your lack of faith disturbing."} + - match: {values.2.0: "Wars not make one great."} + - match: {values.3.0: "No, I am your father."} + - match: {values.4.0: "May the force be with you."} + - match: {values.5.0: "That's no moon. It's a space station."} + diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index e8fd0da496bbe..b43d87c17e644 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -339,8 +339,7 @@ protected Object parseSourceValue(Object value) { BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed() ? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name()) : BlockSourceReader.lookupMatchingAll(); - var sourceMode = blContext.indexSettings().getIndexMappingSourceMode(); - return new BlockSourceReader.LongsBlockLoader(valueFetcher, lookup, sourceMode); + return new BlockSourceReader.LongsBlockLoader(valueFetcher, lookup); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 980cdc09252cb..9ebc510af4f4d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -220,7 +220,7 @@ static GetDeploymentStatsAction.Response addFailedRoutes( // add nodes from the failures that were not in the task responses for (var nodeRoutingState : nodeToRoutingStates.entrySet()) { - if (visitedNodes.contains(nodeRoutingState.getKey()) == false) { + if ((visitedNodes.contains(nodeRoutingState.getKey()) == false) && nodes.nodeExists(nodeRoutingState.getKey())) { updatedNodeStats.add( AssignmentStats.NodeStats.forNotStartedState( nodes.get(nodeRoutingState.getKey()), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java index bbd63e0d3bfe9..0dec99a9b9bb9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScaler.java @@ -33,6 +33,7 @@ public class AdaptiveAllocationsScaler { private final String deploymentId; private final KalmanFilter1d requestRateEstimator; private final KalmanFilter1d inferenceTimeEstimator; + private final long scaleToZeroAfterNoRequestsSeconds; private double timeWithoutRequestsSeconds; private int numberOfAllocations; @@ -44,10 +45,11 @@ public class AdaptiveAllocationsScaler { private Double lastMeasuredRequestRate; private Double lastMeasuredInferenceTime; private Long lastMeasuredQueueSize; - private long scaleToZeroAfterNoRequestsSeconds; AdaptiveAllocationsScaler(String deploymentId, int numberOfAllocations, long scaleToZeroAfterNoRequestsSeconds) { this.deploymentId = deploymentId; + this.scaleToZeroAfterNoRequestsSeconds = scaleToZeroAfterNoRequestsSeconds; + // A smoothing factor of 100 roughly means the last 100 measurements have an effect // on the estimated values. The sampling time is 10 seconds, so approximately the // last 15 minutes are taken into account. @@ -67,7 +69,6 @@ public class AdaptiveAllocationsScaler { lastMeasuredRequestRate = null; lastMeasuredInferenceTime = null; lastMeasuredQueueSize = null; - this.scaleToZeroAfterNoRequestsSeconds = scaleToZeroAfterNoRequestsSeconds; } void setMinMaxNumberOfAllocations(Integer minNumberOfAllocations, Integer maxNumberOfAllocations) { @@ -117,6 +118,10 @@ void process(AdaptiveAllocationsScalerService.Stats stats, double timeIntervalSe dynamicsChanged = false; } + void resetTimeWithoutRequests() { + timeWithoutRequestsSeconds = 0; + } + double getLoadLower() { double requestRateLower = Math.max(0.0, requestRateEstimator.lower()); double inferenceTimeLower = Math.max(0.0, inferenceTimeEstimator.hasValue() ? inferenceTimeEstimator.lower() : 1.0); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java index 770e890512935..16ec3ee9b468c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerService.java @@ -188,7 +188,10 @@ Collection observeDouble(Function nodeStatsList = new ArrayList<>(); + nodeStatsList.add(AssignmentStatsTests.randomNodeStats(nodes.get("node1"))); + nodeStatsList.add(AssignmentStatsTests.randomNodeStats(nodes.get("node2"))); + + var model1 = new AssignmentStats( + "model1", + "deployment1", + randomBoolean() ? null : randomIntBetween(1, 8), + randomBoolean() ? null : randomIntBetween(1, 8), + null, + randomBoolean() ? null : randomIntBetween(1, 10000), + randomBoolean() ? null : ByteSizeValue.ofBytes(randomLongBetween(1, 1000000)), + Instant.now(), + nodeStatsList, + randomFrom(Priority.values()) + ); + var response = new GetDeploymentStatsAction.Response(Collections.emptyList(), Collections.emptyList(), List.of(model1), 1); + + // failed state for node 3 conflicts + Map> badRoutes = new HashMap<>(); + Map nodeRoutes = new HashMap<>(); + nodeRoutes.put("node3", new RoutingInfo(1, 1, RoutingState.FAILED, "failed on node3")); + badRoutes.put(createAssignment("model1"), nodeRoutes); + + var modified = TransportGetDeploymentStatsAction.addFailedRoutes(response, badRoutes, nodes); + List results = modified.getStats().results(); + assertThat(results, hasSize(1)); + assertThat(results.get(0).getNodeStats(), hasSize(2)); // 3 + assertEquals("node1", results.get(0).getNodeStats().get(0).getNode().getId()); + assertEquals("node2", results.get(0).getNodeStats().get(1).getNode().getId()); + } + private DiscoveryNodes buildNodes(String... nodeIds) throws UnknownHostException { InetAddress inetAddress = InetAddress.getByAddress(new byte[] { (byte) 192, (byte) 168, (byte) 0, (byte) 1 }); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerTests.java index 0fb8ad314343a..a5276e7371128 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerTests.java @@ -195,6 +195,35 @@ public void testAutoscaling_scaleDownToZeroAllocations() { } } + public void testAutoscaling_resetTimeWithoutRequests() { + int scaleDownAfterInactivitySeconds = 60 * 15; // scale down to 0 after 15 minutes + AdaptiveAllocationsScaler adaptiveAllocationsScaler = new AdaptiveAllocationsScaler( + "test-deployment", + 0, + scaleDownAfterInactivitySeconds + ); + + // 1 hour without requests, but call "reset" every 10 minutes, so don't scale. + for (int i = 0; i < 360; i++) { + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.05), 10, 0); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + if (i % 60 == 0) { + adaptiveAllocationsScaler.resetTimeWithoutRequests(); + } + } + + adaptiveAllocationsScaler.resetTimeWithoutRequests(); + // 15 minutes with no requests, so don't scale. + for (int i = 0; i < 90; i++) { + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.05), 10, 1); + assertThat(adaptiveAllocationsScaler.scale(), nullValue()); + } + + // another second with no requests, so scale to zero allocations. + adaptiveAllocationsScaler.process(new AdaptiveAllocationsScalerService.Stats(0, 0, 0, 0.05), 1, 1); + assertThat(adaptiveAllocationsScaler.scale(), equalTo(0)); + } + public void testAutoscaling_dontScaleDownToZeroAllocationsWhenMinAllocationsIsSet() { AdaptiveAllocationsScaler adaptiveAllocationsScaler = new AdaptiveAllocationsScaler("test-deployment", 1, 60); adaptiveAllocationsScaler.setMinMaxNumberOfAllocations(1, null); diff --git a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java index c5978219d94d3..37e1807d138aa 100644 --- a/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java +++ b/x-pack/plugin/rank-rrf/src/internalClusterTest/java/org/elasticsearch/xpack/rank/rrf/RRFRetrieverBuilderIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.rank.rrf; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequestBuilder; @@ -18,6 +20,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -47,7 +50,6 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -589,11 +591,11 @@ public void testRRFExplainWithAnotherNestedRRF() { }); } - public void testRRFInnerRetrieverSearchError() { + public void testRRFInnerRetrieverAll4xxSearchErrors() { final int rankWindowSize = 100; final int rankConstant = 10; SearchSourceBuilder source = new SearchSourceBuilder(); - // this will throw an error during evaluation + // this will throw a 4xx error during evaluation StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder( QueryBuilders.constantScoreQuery(QueryBuilders.rangeQuery(VECTOR_FIELD).gte(10)) ); @@ -615,10 +617,57 @@ public void testRRFInnerRetrieverSearchError() { ) ); SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); - Exception ex = expectThrows(IllegalStateException.class, req::get); - assertThat(ex, instanceOf(IllegalStateException.class)); - assertThat(ex.getMessage(), containsString("Search failed - some nested retrievers returned errors")); - assertThat(ex.getSuppressed().length, greaterThan(0)); + Exception ex = expectThrows(ElasticsearchStatusException.class, req::get); + assertThat(ex, instanceOf(ElasticsearchStatusException.class)); + assertThat( + ex.getMessage(), + containsString( + "[rrf] search failed - retrievers '[standard]' returned errors. All failures are attached as suppressed exceptions." + ) + ); + assertThat(ExceptionsHelper.status(ex), equalTo(RestStatus.BAD_REQUEST)); + assertThat(ex.getSuppressed().length, equalTo(1)); + assertThat(ex.getSuppressed()[0].getCause().getCause(), instanceOf(IllegalArgumentException.class)); + } + + public void testRRFInnerRetrieverMultipleErrorsOne5xx() { + final int rankWindowSize = 100; + final int rankConstant = 10; + SearchSourceBuilder source = new SearchSourceBuilder(); + // this will throw a 4xx error during evaluation + StandardRetrieverBuilder standard0 = new StandardRetrieverBuilder( + QueryBuilders.constantScoreQuery(QueryBuilders.rangeQuery(VECTOR_FIELD).gte(10)) + ); + // this will throw a 5xx error + TestRetrieverBuilder testRetrieverBuilder = new TestRetrieverBuilder("val") { + @Override + public void extractToSearchSourceBuilder(SearchSourceBuilder searchSourceBuilder, boolean compoundUsed) { + searchSourceBuilder.aggregation(AggregationBuilders.avg("some_invalid_param")); + } + }; + source.retriever( + new RRFRetrieverBuilder( + Arrays.asList( + new CompoundRetrieverBuilder.RetrieverSource(standard0, null), + new CompoundRetrieverBuilder.RetrieverSource(testRetrieverBuilder, null) + ), + rankWindowSize, + rankConstant + ) + ); + SearchRequestBuilder req = client().prepareSearch(INDEX).setSource(source); + Exception ex = expectThrows(ElasticsearchStatusException.class, req::get); + assertThat(ex, instanceOf(ElasticsearchStatusException.class)); + assertThat( + ex.getMessage(), + containsString( + "[rrf] search failed - retrievers '[standard, test]' returned errors. All failures are attached as suppressed exceptions." + ) + ); + assertThat(ExceptionsHelper.status(ex), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); + assertThat(ex.getSuppressed().length, equalTo(2)); + assertThat(ex.getSuppressed()[0].getCause().getCause(), instanceOf(IllegalArgumentException.class)); + assertThat(ex.getSuppressed()[1].getCause().getCause(), instanceOf(IllegalStateException.class)); } public void testRRFInnerRetrieverErrorWhenExtractingToSource() { diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java index 1abb9bbb067dc..523f04fb436f4 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java @@ -19,6 +19,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.test.TestSecurityClient; import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; @@ -41,9 +42,7 @@ public abstract class SecurityOnTrialLicenseRestTestCase extends ESRestTestCase { private TestSecurityClient securityClient; - @ClassRule - public static ElasticsearchCluster cluster = ElasticsearchCluster.local() - .nodes(2) + public static LocalClusterConfigProvider commonTrialSecurityClusterConfig = cluster -> cluster.nodes(2) .distribution(DistributionType.DEFAULT) .setting("xpack.ml.enabled", "false") .setting("xpack.license.self_generated.type", "trial") @@ -62,8 +61,10 @@ public abstract class SecurityOnTrialLicenseRestTestCase extends ESRestTestCase .user("admin_user", "admin-password", ROOT_USER_ROLE, true) .user("security_test_user", "security-test-password", "security_test_role", false) .user("x_pack_rest_user", "x-pack-test-password", ROOT_USER_ROLE, true) - .user("cat_test_user", "cat-test-password", "cat_test_role", false) - .build(); + .user("cat_test_user", "cat-test-password", "cat_test_role", false); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local().apply(commonTrialSecurityClusterConfig).build(); @Override protected String getTestRestCluster() { diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java new file mode 100644 index 0000000000000..51970af4b88a0 --- /dev/null +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/rolemapping/RoleMappingRestIT.java @@ -0,0 +1,268 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.rolemapping; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; +import org.junit.ClassRule; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; + +public class RoleMappingRestIT extends ESRestTestCase { + private static final String settingsJson = """ + { + "metadata": { + "version": "1", + "compatibility": "8.4.0" + }, + "state": { + "role_mappings": { + "role-mapping-1": { + "enabled": true, + "roles": [ "role_1" ], + "rules": { "field": { "username": "no_user" } }, + "metadata": { + "uuid" : "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo": "something", + "_es_reserved_role_mapping_name": "ignored" + } + }, + "role-mapping-2": { + "enabled": true, + "roles": [ "role_2" ], + "rules": { "field": { "username": "no_user" } } + }, + "role-mapping-3": { + "enabled": true, + "roles": [ "role_3" ], + "rules": { "field": { "username": "no_user" } }, + "metadata": { + "_read_only" : { "field": 1 }, + "_es_reserved_role_mapping_name": { "still_ignored": true } + } + } + } + } + }"""; + private static final ExpressionRoleMapping clusterStateMapping1 = new ExpressionRoleMapping( + "role-mapping-1-read-only-operator-mapping", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("no_user"))), + List.of("role_1"), + null, + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", "_read_only", true), + true + ); + private static final ExpressionRoleMapping clusterStateMapping2 = new ExpressionRoleMapping( + "role-mapping-2-read-only-operator-mapping", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("no_user"))), + List.of("role_2"), + null, + Map.of("_read_only", true), + true + ); + private static final ExpressionRoleMapping clusterStateMapping3 = new ExpressionRoleMapping( + "role-mapping-3-read-only-operator-mapping", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("no_user"))), + List.of("role_3"), + null, + Map.of("_read_only", true), + true + ); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .apply(SecurityOnTrialLicenseRestTestCase.commonTrialSecurityClusterConfig) + .configFile("operator/settings.json", Resource.fromString(settingsJson)) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testGetRoleMappings() throws IOException { + expectMappings(List.of(clusterStateMapping1, clusterStateMapping2, clusterStateMapping3)); + expectMappings(List.of(clusterStateMapping1), "role-mapping-1"); + expectMappings(List.of(clusterStateMapping1, clusterStateMapping3), "role-mapping-1", "role-mapping-3"); + expectMappings(List.of(clusterStateMapping1), clusterStateMapping1.getName()); + expectMappings(List.of(clusterStateMapping1), clusterStateMapping1.getName(), "role-mapping-1"); + + expect404(() -> getMappings("role-mapping-4")); + expect404(() -> getMappings("role-mapping-4-read-only-operator-mapping")); + + ExpressionRoleMapping nativeMapping1 = expressionRoleMapping("role-mapping-1"); + putMapping(nativeMapping1, createOrUpdateWarning(nativeMapping1.getName())); + + ExpressionRoleMapping nativeMapping4 = expressionRoleMapping("role-mapping-4"); + putMapping(nativeMapping4); + + expectMappings(List.of(clusterStateMapping1, clusterStateMapping2, clusterStateMapping3, nativeMapping1, nativeMapping4)); + expectMappings(List.of(clusterStateMapping1, nativeMapping1), "role-mapping-1"); + expectMappings(List.of(clusterStateMapping1, nativeMapping1), "role-mapping-1", clusterStateMapping1.getName()); + expectMappings(List.of(clusterStateMapping1), clusterStateMapping1.getName()); + expectMappings(List.of(nativeMapping4), "role-mapping-4"); + expectMappings(List.of(nativeMapping4), "role-mapping-4", "role-mapping-4-read-only-operator-mapping"); + } + + public void testPutAndDeleteRoleMappings() throws IOException { + { + var ex = expectThrows( + ResponseException.class, + () -> putMapping(expressionRoleMapping("role-mapping-1-read-only-operator-mapping")) + ); + assertThat( + ex.getMessage(), + containsString( + "Invalid mapping name [role-mapping-1-read-only-operator-mapping]. " + + "[-read-only-operator-mapping] is not an allowed suffix" + ) + ); + } + + // Also fails even if a CS role mapping with that name does not exist + { + var ex = expectThrows( + ResponseException.class, + () -> putMapping(expressionRoleMapping("role-mapping-4-read-only-operator-mapping")) + ); + assertThat( + ex.getMessage(), + containsString( + "Invalid mapping name [role-mapping-4-read-only-operator-mapping]. " + + "[-read-only-operator-mapping] is not an allowed suffix" + ) + ); + } + + assertOK(putMapping(expressionRoleMapping("role-mapping-1"), createOrUpdateWarning("role-mapping-1"))); + + assertOK(deleteMapping("role-mapping-1", deletionWarning("role-mapping-1"))); + + // 404 without warnings if no native mapping exists + expect404(() -> deleteMapping("role-mapping-1")); + } + + private static void expect404(ThrowingRunnable clientCall) { + var ex = expectThrows(ResponseException.class, clientCall); + assertThat(ex.getResponse().getStatusLine().getStatusCode(), equalTo(404)); + } + + private static Response putMapping(ExpressionRoleMapping roleMapping) throws IOException { + return putMapping(roleMapping, null); + } + + private static Response putMapping(ExpressionRoleMapping roleMapping, @Nullable String warning) throws IOException { + Request request = new Request("PUT", "/_security/role_mapping/" + roleMapping.getName()); + XContentBuilder xContent = roleMapping.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS); + request.setJsonEntity(BytesReference.bytes(xContent).utf8ToString()); + if (warning != null) { + request.setOptions( + RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(List.of(warning)) == false).build() + ); + } + return client().performRequest(request); + } + + private static Response deleteMapping(String name) throws IOException { + return deleteMapping(name, null); + } + + private static Response deleteMapping(String name, @Nullable String warning) throws IOException { + Request request = new Request("DELETE", "/_security/role_mapping/" + name); + if (warning != null) { + request.setOptions( + RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(List.of(warning)) == false).build() + ); + } + return client().performRequest(request); + } + + private static ExpressionRoleMapping expressionRoleMapping(String name) { + return new ExpressionRoleMapping( + name, + new FieldExpression("username", List.of(new FieldExpression.FieldValue(randomAlphaOfLength(10)))), + List.of(randomAlphaOfLength(5)), + null, + Map.of(), + true + ); + } + + @SuppressWarnings("unchecked") + private static void expectMappings(List expectedMappings, String... requestedMappingNames) throws IOException { + Map map = responseAsMap(getMappings(requestedMappingNames)); + assertThat( + map.keySet(), + containsInAnyOrder(expectedMappings.stream().map(ExpressionRoleMapping::getName).toList().toArray(new String[0])) + ); + List actualMappings = new ArrayList<>(); + for (Map.Entry entry : map.entrySet()) { + XContentParser body = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, (Map) entry.getValue()); + ExpressionRoleMapping actual = ExpressionRoleMapping.parse(entry.getKey(), body); + actualMappings.add(actual); + } + assertThat(actualMappings, containsInAnyOrder(expectedMappings.toArray(new ExpressionRoleMapping[0]))); + } + + private static Response getMappings(String... requestedMappingNames) throws IOException { + return client().performRequest(new Request("GET", "/_security/role_mapping/" + String.join(",", requestedMappingNames))); + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + private static String createOrUpdateWarning(String mappingName) { + return "A read-only role mapping with the same name [" + + mappingName + + "] has been previously defined in a configuration file. " + + "Both role mappings will be used to determine role assignments."; + } + + private static String deletionWarning(String mappingName) { + return "A read-only role mapping with the same name [" + + mappingName + + "] has previously been defined in a configuration file. " + + "The native role mapping was deleted, but the read-only mapping will remain active " + + "and will be used to determine role assignments."; + }; +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java index 3b6ffd0698623..fdd854e7a9673 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/RoleMappingFileSettingsIT.java @@ -34,6 +34,7 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; +import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; @@ -45,6 +46,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -63,7 +65,6 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.notNullValue; @@ -271,21 +272,28 @@ private void assertRoleMappingsSaveOK(CountDownLatch savedClusterState, AtomicLo assertThat(resolveRolesFuture.get(), containsInAnyOrder("kibana_user", "fleet_user")); } - // the role mappings are not retrievable by the role mapping action (which only accesses "native" i.e. index-based role mappings) - var request = new GetRoleMappingsRequest(); - request.setNames("everyone_kibana", "everyone_fleet"); - var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertFalse(response.hasMappings()); - assertThat(response.mappings(), emptyArray()); - - // role mappings (with the same names) can also be stored in the "native" store - var putRoleMappingResponse = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_kibana")).actionGet(); - assertTrue(putRoleMappingResponse.isCreated()); - putRoleMappingResponse = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_fleet")).actionGet(); - assertTrue(putRoleMappingResponse.isCreated()); + // the role mappings are retrievable by the role mapping action for BWC + assertGetResponseHasMappings(true, "everyone_kibana", "everyone_fleet"); + + // role mappings (with the same names) can be stored in the "native" store + { + PutRoleMappingResponse response = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_kibana")) + .actionGet(); + assertTrue(response.isCreated()); + response = client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest("everyone_fleet")).actionGet(); + assertTrue(response.isCreated()); + } + { + // deleting role mappings that exist in the native store and in cluster-state should result in success + var response = client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_kibana")).actionGet(); + assertTrue(response.isFound()); + response = client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_fleet")).actionGet(); + assertTrue(response.isFound()); + } + } - public void testRoleMappingsApplied() throws Exception { + public void testClusterStateRoleMappingsAddedThenDeleted() throws Exception { ensureGreen(); var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); @@ -294,6 +302,12 @@ public void testRoleMappingsApplied() throws Exception { assertRoleMappingsSaveOK(savedClusterState.v1(), savedClusterState.v2()); logger.info("---> cleanup cluster settings..."); + { + // Deleting non-existent native role mappings returns not found even if they exist in config file + var response = client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_kibana")).get(); + assertFalse(response.isFound()); + } + savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); @@ -308,48 +322,96 @@ public void testRoleMappingsApplied() throws Exception { clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()) ); - // native role mappings are not affected by the removal of the cluster-state based ones + // cluster-state role mapping was removed and is not returned in the API anymore { var request = new GetRoleMappingsRequest(); request.setNames("everyone_kibana", "everyone_fleet"); var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertTrue(response.hasMappings()); - assertThat( - Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), - containsInAnyOrder("everyone_kibana", "everyone_fleet") - ); + assertFalse(response.hasMappings()); } - // and roles are resolved based on the native role mappings + // no role mappings means no roles are resolved for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); userRoleMapper.resolveRoles( new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), resolveRolesFuture ); - assertThat(resolveRolesFuture.get(), contains("kibana_user_native")); + assertThat(resolveRolesFuture.get(), empty()); } + } - { - var request = new DeleteRoleMappingRequest(); - request.setName("everyone_kibana"); - var response = client().execute(DeleteRoleMappingAction.INSTANCE, request).get(); - assertTrue(response.isFound()); - request = new DeleteRoleMappingRequest(); - request.setName("everyone_fleet"); - response = client().execute(DeleteRoleMappingAction.INSTANCE, request).get(); - assertTrue(response.isFound()); + public void testGetRoleMappings() throws Exception { + ensureGreen(); + + final List nativeMappings = List.of("everyone_kibana", "_everyone_kibana", "zzz_mapping", "123_mapping"); + for (var mapping : nativeMappings) { + client().execute(PutRoleMappingAction.INSTANCE, sampleRestRequest(mapping)).actionGet(); } - // no roles are resolved now, because both native and cluster-state based stores have been cleared - for (UserRoleMapper userRoleMapper : internalCluster().getInstances(UserRoleMapper.class)) { - PlainActionFuture> resolveRolesFuture = new PlainActionFuture<>(); - userRoleMapper.resolveRoles( - new UserRoleMapper.UserData("anyUsername", null, List.of(), Map.of(), mock(RealmConfig.class)), - resolveRolesFuture - ); - assertThat(resolveRolesFuture.get(), empty()); + var savedClusterState = setupClusterStateListener(internalCluster().getMasterName(), "everyone_kibana"); + writeJSONFile(internalCluster().getMasterName(), testJSON, logger, versionCounter); + boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + var request = new GetRoleMappingsRequest(); + var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder( + "everyone_kibana", + ExpressionRoleMapping.addReadOnlySuffix("everyone_kibana"), + "_everyone_kibana", + ExpressionRoleMapping.addReadOnlySuffix("everyone_fleet"), + "zzz_mapping", + "123_mapping" + ) + ); + + List readOnlyFlags = new ArrayList<>(); + for (ExpressionRoleMapping mapping : response.mappings()) { + boolean isReadOnly = ExpressionRoleMapping.hasReadOnlySuffix(mapping.getName()) + && mapping.getMetadata().get("_read_only") != null; + readOnlyFlags.add(isReadOnly); } + // assert that cluster-state role mappings come last + assertThat(readOnlyFlags, contains(false, false, false, false, true, true)); + + // it's possible to delete overlapping native role mapping + assertTrue(client().execute(DeleteRoleMappingAction.INSTANCE, deleteRequest("everyone_kibana")).actionGet().isFound()); + + // Fetch a specific file based role + request = new GetRoleMappingsRequest(); + request.setNames(ExpressionRoleMapping.addReadOnlySuffix("everyone_kibana")); + response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder(ExpressionRoleMapping.addReadOnlySuffix("everyone_kibana")) + ); + + savedClusterState = setupClusterStateListenerForCleanup(internalCluster().getMasterName()); + writeJSONFile(internalCluster().getMasterName(), emptyJSON, logger, versionCounter); + awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); + assertTrue(awaitSuccessful); + + final ClusterStateResponse clusterStateResponse = clusterAdmin().state( + new ClusterStateRequest(TEST_REQUEST_TIMEOUT).waitForMetadataVersion(savedClusterState.v2().get()) + ).get(); + + assertNull( + clusterStateResponse.getState().metadata().persistentSettings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()) + ); + + // Make sure remaining native mappings can still be fetched + request = new GetRoleMappingsRequest(); + response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder("_everyone_kibana", "zzz_mapping", "123_mapping") + ); } public static Tuple setupClusterStateListenerForError( @@ -434,11 +496,8 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { boolean awaitSuccessful = savedClusterState.v1().await(20, TimeUnit.SECONDS); assertTrue(awaitSuccessful); - // no native role mappings exist - var request = new GetRoleMappingsRequest(); - request.setNames("everyone_kibana", "everyone_fleet"); - var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); - assertFalse(response.hasMappings()); + // even if index is closed, cluster-state role mappings are still returned + assertGetResponseHasMappings(true, "everyone_kibana", "everyone_fleet"); // cluster state settings are also applied var clusterStateResponse = clusterAdmin().state( @@ -477,6 +536,12 @@ public void testRoleMappingApplyWithSecurityIndexClosed() throws Exception { } } + private DeleteRoleMappingRequest deleteRequest(String name) { + var request = new DeleteRoleMappingRequest(); + request.setName(name); + return request; + } + private PutRoleMappingRequest sampleRestRequest(String name) throws Exception { var json = """ { @@ -495,4 +560,19 @@ private PutRoleMappingRequest sampleRestRequest(String name) throws Exception { return new PutRoleMappingRequestBuilder(null).source(name, parser).request(); } } + + private static void assertGetResponseHasMappings(boolean readOnly, String... mappings) throws InterruptedException, ExecutionException { + var request = new GetRoleMappingsRequest(); + request.setNames(mappings); + var response = client().execute(GetRoleMappingsAction.INSTANCE, request).get(); + assertTrue(response.hasMappings()); + assertThat( + Arrays.stream(response.mappings()).map(ExpressionRoleMapping::getName).toList(), + containsInAnyOrder( + Arrays.stream(mappings) + .map(mapping -> readOnly ? ExpressionRoleMapping.addReadOnlySuffix(mapping) : mapping) + .toArray(String[]::new) + ) + ); + } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java index 6c6582138ce89..97a5f080cee4e 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsRestartIT.java @@ -30,6 +30,7 @@ import static org.elasticsearch.integration.RoleMappingFileSettingsIT.setupClusterStateListenerForCleanup; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFile; import static org.elasticsearch.integration.RoleMappingFileSettingsIT.writeJSONFileWithoutVersionIncrement; +import static org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata.METADATA_NAME_FIELD; import static org.hamcrest.Matchers.containsInAnyOrder; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) @@ -123,7 +124,7 @@ public void testReservedStatePersistsOnRestart() throws Exception { new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), true ), new ExpressionRoleMapping( @@ -131,7 +132,14 @@ public void testReservedStatePersistsOnRestart() throws Exception { new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("fleet_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" + ), false ) ); @@ -141,26 +149,29 @@ public void testReservedStatePersistsOnRestart() throws Exception { ensureGreen(); awaitFileSettingsWatcher(); - // assert busy to give mappings time to update after restart; otherwise, the role mapping names might be dummy values - // `name_not_available_after_deserialization` - assertBusy( - () -> assertRoleMappingsInClusterState( - new ExpressionRoleMapping( - "everyone_kibana_alone", - new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), - List.of("kibana_user"), - List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), - true + assertRoleMappingsInClusterState( + new ExpressionRoleMapping( + "everyone_kibana_alone", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), + List.of("kibana_user"), + List.of(), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), + true + ), + new ExpressionRoleMapping( + "everyone_fleet_alone", + new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), + List.of("fleet_user"), + List.of(), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" ), - new ExpressionRoleMapping( - "everyone_fleet_alone", - new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), - List.of("fleet_user"), - List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), - false - ) + false ) ); @@ -197,7 +208,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), true ), new ExpressionRoleMapping( @@ -205,7 +216,14 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("fleet_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" + ), false ) ); @@ -225,7 +243,7 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something", METADATA_NAME_FIELD, "everyone_kibana_alone"), true ), new ExpressionRoleMapping( @@ -233,7 +251,14 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("fleet_user"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", "_foo", "something_else"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be3-bb8d-02bb270cb3a7", + "_foo", + "something_else", + METADATA_NAME_FIELD, + "everyone_fleet_alone" + ), false ) ); @@ -251,7 +276,14 @@ public void testFileSettingsReprocessedOnRestartWithoutVersionChange() throws Ex new FieldExpression("username", List.of(new FieldExpression.FieldValue("*"))), List.of("kibana_user", "kibana_admin"), List.of(), - Map.of("uuid", "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", "_foo", "something"), + Map.of( + "uuid", + "b9a59ba9-6b92-4be2-bb8d-02bb270cb3a7", + "_foo", + "something", + METADATA_NAME_FIELD, + "everyone_kibana_together" + ), true ) ) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 79a00fa1293bd..8f32bcf7ace8a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -899,6 +899,7 @@ Collection createComponents( components.add(nativeUsersStore); components.add(new PluginComponentBinding<>(NativeRoleMappingStore.class, nativeRoleMappingStore)); components.add(new PluginComponentBinding<>(UserRoleMapper.class, userRoleMapper)); + components.add(clusterStateRoleMapper); components.add(reservedRealm); components.add(realms); this.realms.set(realms); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java index 73d1a1abcdb50..837b475dea68f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/ReservedRoleMappingAction.java @@ -43,7 +43,7 @@ public String name() { @Override public TransformState transform(Object source, TransformState prevState) throws Exception { @SuppressWarnings("unchecked") - Set roleMappings = validate((List) source); + Set roleMappings = validateAndTranslate((List) source); RoleMappingMetadata newRoleMappingMetadata = new RoleMappingMetadata(roleMappings); if (newRoleMappingMetadata.equals(RoleMappingMetadata.getFromClusterState(prevState.state()))) { return prevState; @@ -71,7 +71,7 @@ public List fromXContent(XContentParser parser) throws IO return result; } - private Set validate(List roleMappings) { + private Set validateAndTranslate(List roleMappings) { var exceptions = new ArrayList(); for (var roleMapping : roleMappings) { // File based defined role mappings are allowed to use MetadataUtils.RESERVED_PREFIX @@ -85,6 +85,8 @@ private Set validate(List roleMapp exceptions.forEach(illegalArgumentException::addSuppressed); throw illegalArgumentException; } - return roleMappings.stream().map(PutRoleMappingRequest::getMapping).collect(Collectors.toUnmodifiableSet()); + return roleMappings.stream() + .map(r -> RoleMappingMetadata.copyWithNameInMetadata(r.getMapping())) + .collect(Collectors.toUnmodifiableSet()); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java index 74129facae70a..b1fdf2e90dd46 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportDeleteRoleMappingAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -16,17 +17,19 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingResponse; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; public class TransportDeleteRoleMappingAction extends HandledTransportAction { - private final NativeRoleMappingStore roleMappingStore; + private final ClusterStateRoleMapper clusterStateRoleMapper; @Inject public TransportDeleteRoleMappingAction( ActionFilters actionFilters, TransportService transportService, - NativeRoleMappingStore roleMappingStore + NativeRoleMappingStore roleMappingStore, + ClusterStateRoleMapper clusterStateRoleMapper ) { super( DeleteRoleMappingAction.NAME, @@ -36,10 +39,24 @@ public TransportDeleteRoleMappingAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.roleMappingStore = roleMappingStore; + this.clusterStateRoleMapper = clusterStateRoleMapper; } @Override protected void doExecute(Task task, DeleteRoleMappingRequest request, ActionListener listener) { - roleMappingStore.deleteRoleMapping(request, listener.safeMap(DeleteRoleMappingResponse::new)); + roleMappingStore.deleteRoleMapping(request, listener.safeMap(found -> { + if (found && clusterStateRoleMapper.hasMapping(request.getName())) { + // Allow to delete a mapping with the same name in the native role mapping store as the file_settings namespace, but + // add a warning header to signal to the caller that this could be a problem. + HeaderWarning.addWarning( + "A read-only role mapping with the same name [" + + request.getName() + + "] has previously been defined in a configuration file. " + + "The native role mapping was deleted, but the read-only mapping will remain active " + + "and will be used to determine role assignments." + ); + } + return new DeleteRoleMappingResponse(found); + })); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java index ac0d3177cca09..5f16b095db0ef 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsAction.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.security.action.rolemapping; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; @@ -17,21 +19,31 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authz.RoleMappingMetadata; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; public class TransportGetRoleMappingsAction extends HandledTransportAction { + private static final Logger logger = LogManager.getLogger(TransportGetRoleMappingsAction.class); private final NativeRoleMappingStore roleMappingStore; + private final ClusterStateRoleMapper clusterStateRoleMapper; @Inject public TransportGetRoleMappingsAction( ActionFilters actionFilters, TransportService transportService, - NativeRoleMappingStore nativeRoleMappingStore + NativeRoleMappingStore nativeRoleMappingStore, + ClusterStateRoleMapper clusterStateRoleMapper ) { super( GetRoleMappingsAction.NAME, @@ -41,19 +53,84 @@ public TransportGetRoleMappingsAction( EsExecutors.DIRECT_EXECUTOR_SERVICE ); this.roleMappingStore = nativeRoleMappingStore; + this.clusterStateRoleMapper = clusterStateRoleMapper; } @Override protected void doExecute(Task task, final GetRoleMappingsRequest request, final ActionListener listener) { final Set names; if (request.getNames() == null || request.getNames().length == 0) { - names = null; + names = Set.of(); } else { names = new HashSet<>(Arrays.asList(request.getNames())); } - this.roleMappingStore.getRoleMappings(names, ActionListener.wrap(mappings -> { - ExpressionRoleMapping[] array = mappings.toArray(new ExpressionRoleMapping[mappings.size()]); - listener.onResponse(new GetRoleMappingsResponse(array)); + roleMappingStore.getRoleMappings(names, ActionListener.wrap(nativeRoleMappings -> { + final Collection clusterStateRoleMappings = clusterStateRoleMapper.getMappings( + // if the API was queried with a reserved suffix for any of the names, we need to remove it because role mappings are + // stored without it in cluster-state + removeReadOnlySuffixIfPresent(names) + ); + listener.onResponse(buildResponse(clusterStateRoleMappings, nativeRoleMappings)); }, listener::onFailure)); } + + private GetRoleMappingsResponse buildResponse( + Collection clusterStateMappings, + Collection nativeMappings + ) { + Stream translatedClusterStateMappings = clusterStateMappings.stream().filter(roleMapping -> { + if (RoleMappingMetadata.hasFallbackName(roleMapping)) { + logger.warn( + "Role mapping retrieved from cluster-state with an ambiguous name. It will be omitted from the API response." + + "This is likely a transient issue during node start-up." + ); + return false; + } + return true; + }).map(this::translateClusterStateMapping); + return new GetRoleMappingsResponse( + Stream.concat(nativeMappings.stream(), translatedClusterStateMappings).toArray(ExpressionRoleMapping[]::new) + ); + } + + private Set removeReadOnlySuffixIfPresent(Set names) { + return names.stream().map(ExpressionRoleMapping::removeReadOnlySuffixIfPresent).collect(Collectors.toSet()); + } + + /** + * Translator method for ensuring unique API names and marking cluster-state role mappings as read-only. + * Role mappings retrieved from cluster-state are surfaced through both the transport and REST layers, + * along with native role mappings. Unlike native role mappings, cluster-state role mappings are + * read-only and cannot be modified via APIs. It is possible for cluster-state and native role mappings + * to have overlapping names. + * + *

+ * This does the following: + *

+ * + *
    + *
  1. Appends a reserved suffix to cluster-state role mapping names to avoid conflicts with native role mappings.
  2. + *
  3. Marks the metadata of cluster-state role mappings with a reserved read-only flag.
  4. + *
  5. Removes internal metadata flag used in processing (see {@link RoleMappingMetadata#METADATA_NAME_FIELD}).
  6. + *
+ */ + private ExpressionRoleMapping translateClusterStateMapping(ExpressionRoleMapping mapping) { + Map metadata = new HashMap<>(mapping.getMetadata()); + if (metadata.put(ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true) != null) { + logger.error( + "Metadata field [{}] is reserved and will be overwritten with an internal system value. " + + "Rename this field in your role mapping configuration.", + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG + ); + } + metadata.remove(RoleMappingMetadata.METADATA_NAME_FIELD); + return new ExpressionRoleMapping( + ExpressionRoleMapping.addReadOnlySuffix(mapping.getName()), + mapping.getExpression(), + mapping.getRoles(), + mapping.getRoleTemplates(), + metadata, + mapping.isEnabled() + ); + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java index 82a3b4f000064..682ade925d2ec 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingAction.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.tasks.Task; @@ -16,24 +17,41 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.validateNoReadOnlySuffix; + public class TransportPutRoleMappingAction extends HandledTransportAction { private final NativeRoleMappingStore roleMappingStore; + private final ClusterStateRoleMapper clusterStateRoleMapper; @Inject public TransportPutRoleMappingAction( ActionFilters actionFilters, TransportService transportService, - NativeRoleMappingStore roleMappingStore + NativeRoleMappingStore roleMappingStore, + ClusterStateRoleMapper clusterStateRoleMapper ) { super(PutRoleMappingAction.NAME, transportService, actionFilters, PutRoleMappingRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.roleMappingStore = roleMappingStore; + this.clusterStateRoleMapper = clusterStateRoleMapper; } @Override protected void doExecute(Task task, final PutRoleMappingRequest request, final ActionListener listener) { + validateNoReadOnlySuffix(request.getName()); + if (clusterStateRoleMapper.hasMapping(request.getName())) { + // Allow to define a mapping with the same name in the native role mapping store as the file_settings namespace, but add a + // warning header to signal to the caller that this could be a problem. + HeaderWarning.addWarning( + "A read-only role mapping with the same name [" + + request.getName() + + "] has been previously defined in a configuration file. " + + "Both role mappings will be used to determine role assignments." + ); + } roleMappingStore.putRoleMapping( request, ActionListener.wrap(created -> listener.onResponse(new PutRoleMappingResponse(created)), listener::onFailure) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java index 5dea6a938263c..99e3311283920 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/ClusterStateRoleMapper.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; import org.elasticsearch.script.ScriptService; import org.elasticsearch.xpack.core.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; @@ -21,6 +22,7 @@ import java.util.Objects; import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.security.SecurityExtension.SecurityComponents; @@ -28,8 +30,7 @@ * A role mapper the reads the role mapping rules (i.e. {@link ExpressionRoleMapping}s) from the cluster state * (i.e. {@link RoleMappingMetadata}). This is not enabled by default. */ -public final class ClusterStateRoleMapper extends AbstractRoleMapperClearRealmCache implements ClusterStateListener { - +public class ClusterStateRoleMapper extends AbstractRoleMapperClearRealmCache implements ClusterStateListener { /** * This setting is never registered by the xpack security plugin - in order to disable the * cluster-state based role mapper another plugin must register it as a boolean setting @@ -81,13 +82,26 @@ public void clusterChanged(ClusterChangedEvent event) { } } - private Set getMappings() { + public boolean hasMapping(String name) { + if (enabled == false) { + return false; + } + return false == getMappings(Set.of(name)).isEmpty(); + } + + public Set getMappings() { + return getMappings(null); + } + + public Set getMappings(@Nullable Set names) { if (enabled == false) { return Set.of(); - } else { - final Set mappings = RoleMappingMetadata.getFromClusterState(clusterService.state()).getRoleMappings(); - logger.trace("Retrieved [{}] mapping(s) from cluster state", mappings.size()); + } + final Set mappings = RoleMappingMetadata.getFromClusterState(clusterService.state()).getRoleMappings(); + logger.trace("Retrieved [{}] mapping(s) from cluster state", mappings.size()); + if (names == null || names.isEmpty()) { return mappings; } + return mappings.stream().filter(roleMapping -> names.contains(roleMapping.getName())).collect(Collectors.toSet()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java index 6e8698f095d32..010c19e8cc1b1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportGetRoleMappingsActionTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; @@ -19,21 +18,26 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsRequest; import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; -import org.hamcrest.Matchers; import org.junit.Before; -import java.util.Arrays; +import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; -import static org.hamcrest.Matchers.arrayContaining; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_METADATA_FLAG; +import static org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.notNullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anySet; import static org.mockito.ArgumentMatchers.nullable; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -42,8 +46,10 @@ public class TransportGetRoleMappingsActionTests extends ESTestCase { private NativeRoleMappingStore store; private TransportGetRoleMappingsAction action; - private AtomicReference> namesRef; - private List result; + private AtomicReference> nativeNamesRef; + private AtomicReference> clusterStateNamesRef; + private List nativeMappings; + private Set clusterStateMappings; @SuppressWarnings("unchecked") @Before @@ -58,68 +64,219 @@ public void setupMocks() { null, Collections.emptySet() ); - action = new TransportGetRoleMappingsAction(mock(ActionFilters.class), transportService, store); + ClusterStateRoleMapper clusterStateRoleMapper = mock(); + action = new TransportGetRoleMappingsAction(mock(ActionFilters.class), transportService, store, clusterStateRoleMapper); - namesRef = new AtomicReference<>(null); - result = Collections.emptyList(); + nativeNamesRef = new AtomicReference<>(null); + clusterStateNamesRef = new AtomicReference<>(null); + nativeMappings = Collections.emptyList(); + clusterStateMappings = Collections.emptySet(); + + doAnswer(invocation -> { + Object[] args = invocation.getArguments(); + assert args.length == 1; + clusterStateNamesRef.set((Set) args[0]); + return clusterStateMappings; + }).when(clusterStateRoleMapper).getMappings(anySet()); doAnswer(invocation -> { Object[] args = invocation.getArguments(); assert args.length == 2; - namesRef.set((Set) args[0]); + nativeNamesRef.set((Set) args[0]); ActionListener> listener = (ActionListener>) args[1]; - listener.onResponse(result); + listener.onResponse(nativeMappings); return null; }).when(store).getRoleMappings(nullable(Set.class), any(ActionListener.class)); } - public void testGetSingleRole() throws Exception { - final PlainActionFuture future = new PlainActionFuture<>(); - final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); - request.setNames("everyone"); + public void testGetSingleRoleMappingNativeOnly() throws Exception { + testGetMappings(List.of(mapping("everyone")), Collections.emptySet(), Set.of("everyone"), Set.of("everyone"), "everyone"); + } - final ExpressionRoleMapping mapping = mock(ExpressionRoleMapping.class); - result = Collections.singletonList(mapping); - action.doExecute(mock(Task.class), request, future); - assertThat(future.get(), notNullValue()); - assertThat(future.get().mappings(), arrayContaining(mapping)); - assertThat(namesRef.get(), containsInAnyOrder("everyone")); + public void testGetMultipleNamedRoleMappingsNativeOnly() throws Exception { + testGetMappings( + List.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Collections.emptySet(), + Set.of("admin", "engineering", "sales", "finance"), + Set.of("admin", "engineering", "sales", "finance"), + "admin", + "engineering", + "sales", + "finance" + ); } - public void testGetMultipleNamedRoles() throws Exception { - final PlainActionFuture future = new PlainActionFuture<>(); - final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); - request.setNames("admin", "engineering", "sales", "finance"); + public void testGetAllRoleMappingsNativeOnly() throws Exception { + testGetMappings( + List.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Collections.emptySet(), + Set.of(), + Set.of() + ); + } - final ExpressionRoleMapping mapping1 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping2 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping3 = mock(ExpressionRoleMapping.class); - result = Arrays.asList(mapping1, mapping2, mapping3); + public void testGetSingleRoleMappingClusterStateOnly() throws Exception { + testGetMappings(List.of(), Set.of(mapping("everyone")), Set.of("everyone"), Set.of("everyone"), "everyone"); + } - action.doExecute(mock(Task.class), request, future); + public void testGetMultipleNamedRoleMappingsClusterStateOnly() throws Exception { + testGetMappings( + List.of(), + Set.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Set.of("admin", "engineering", "sales", "finance"), + Set.of("admin", "engineering", "sales", "finance"), + "admin", + "engineering", + "sales", + "finance" + ); + } + + public void testGetAllRoleMappingsClusterStateOnly() throws Exception { + testGetMappings( + List.of(), + Set.of(mapping("admin"), mapping("engineering"), mapping("sales"), mapping("finance")), + Set.of(), + Set.of() + ); + } + + public void testGetSingleRoleMappingBoth() throws Exception { + testGetMappings(List.of(mapping("everyone")), Set.of(mapping("everyone")), Set.of("everyone"), Set.of("everyone"), "everyone"); + } + + public void testGetMultipleNamedRoleMappingsBoth() throws Exception { + testGetMappings( + List.of(mapping("admin"), mapping("engineering")), + Set.of(mapping("sales"), mapping("finance")), + Set.of("admin", "engineering", "sales", "finance"), + Set.of("admin", "engineering", "sales", "finance"), + "admin", + "engineering", + "sales", + "finance" + ); + } + + public void testGetAllRoleMappingsClusterBoth() throws Exception { + testGetMappings(List.of(mapping("admin"), mapping("engineering")), Set.of(mapping("admin"), mapping("sales")), Set.of(), Set.of()); + } + + public void testGetSingleRoleMappingQueryWithReadOnlySuffix() throws Exception { + testGetMappings( + List.of(), + Set.of(mapping("everyone")), + // suffix not stripped for native store query + Set.of("everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX), + // suffix is stripped for cluster state store + Set.of("everyone"), + "everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX + ); + + testGetMappings( + List.of(), + Set.of(mapping("everyoneread-only-operator-mapping")), + Set.of( + "everyoneread-only-operator-mapping", + "everyone-read-only-operator-mapping-", + "everyone-read-only-operator-mapping-more" + ), + // suffix that is similar but not the same is not stripped + Set.of( + "everyoneread-only-operator-mapping", + "everyone-read-only-operator-mapping-", + "everyone-read-only-operator-mapping-more" + ), + "everyoneread-only-operator-mapping", + "everyone-read-only-operator-mapping-", + "everyone-read-only-operator-mapping-more" + ); + + testGetMappings( + List.of(mapping("everyone")), + Set.of(mapping("everyone")), + // suffix not stripped for native store query + Set.of("everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX, "everyone"), + // suffix is stripped for cluster state store + Set.of("everyone"), + "everyone" + READ_ONLY_ROLE_MAPPING_SUFFIX, + "everyone" + ); + } + + public void testClusterStateRoleMappingWithFallbackNameOmitted() throws ExecutionException, InterruptedException { + testGetMappings( + List.of(), + Set.of(mapping("name_not_available_after_deserialization")), + Set.of(), + Set.of("name_not_available_after_deserialization"), + Set.of("name_not_available_after_deserialization"), + "name_not_available_after_deserialization" + ); - final GetRoleMappingsResponse response = future.get(); - assertThat(response, notNullValue()); - assertThat(response.mappings(), arrayContainingInAnyOrder(mapping1, mapping2, mapping3)); - assertThat(namesRef.get(), containsInAnyOrder("admin", "engineering", "sales", "finance")); + testGetMappings( + List.of(mapping("name_not_available_after_deserialization")), + Set.of(mapping("name_not_available_after_deserialization")), + Set.of(), + Set.of("name_not_available_after_deserialization"), + Set.of("name_not_available_after_deserialization"), + "name_not_available_after_deserialization" + ); + } + + private void testGetMappings( + List returnedNativeMappings, + Set returnedClusterStateMappings, + Set expectedNativeNames, + Set expectedClusterStateNames, + String... names + ) throws InterruptedException, ExecutionException { + testGetMappings( + returnedNativeMappings, + returnedClusterStateMappings, + returnedClusterStateMappings.stream().map(this::expectedClusterStateMapping).collect(Collectors.toSet()), + expectedNativeNames, + expectedClusterStateNames, + names + ); } - public void testGetAllRoles() throws Exception { + private void testGetMappings( + List returnedNativeMappings, + Set returnedClusterStateMappings, + Set expectedClusterStateMappings, + Set expectedNativeNames, + Set expectedClusterStateNames, + String... names + ) throws InterruptedException, ExecutionException { final PlainActionFuture future = new PlainActionFuture<>(); final GetRoleMappingsRequest request = new GetRoleMappingsRequest(); - request.setNames(Strings.EMPTY_ARRAY); - - final ExpressionRoleMapping mapping1 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping2 = mock(ExpressionRoleMapping.class); - final ExpressionRoleMapping mapping3 = mock(ExpressionRoleMapping.class); - result = Arrays.asList(mapping1, mapping2, mapping3); + request.setNames(names); + nativeMappings = returnedNativeMappings; + clusterStateMappings = returnedClusterStateMappings; action.doExecute(mock(Task.class), request, future); + assertThat(future.get(), notNullValue()); + List combined = new ArrayList<>(returnedNativeMappings); + combined.addAll(expectedClusterStateMappings); + ExpressionRoleMapping[] actualMappings = future.get().mappings(); + assertThat(actualMappings, arrayContainingInAnyOrder(combined.toArray(new ExpressionRoleMapping[0]))); + assertThat(nativeNamesRef.get(), containsInAnyOrder(expectedNativeNames.toArray(new String[0]))); + assertThat(clusterStateNamesRef.get(), containsInAnyOrder(expectedClusterStateNames.toArray(new String[0]))); + } - final GetRoleMappingsResponse response = future.get(); - assertThat(response, notNullValue()); - assertThat(response.mappings(), arrayContainingInAnyOrder(mapping1, mapping2, mapping3)); - assertThat(namesRef.get(), Matchers.nullValue(Set.class)); + private ExpressionRoleMapping mapping(String name) { + return new ExpressionRoleMapping(name, null, null, null, Map.of(), true); } + private ExpressionRoleMapping expectedClusterStateMapping(ExpressionRoleMapping mapping) { + return new ExpressionRoleMapping( + mapping.getName() + READ_ONLY_ROLE_MAPPING_SUFFIX, + null, + null, + null, + Map.of(READ_ONLY_ROLE_MAPPING_METADATA_FLAG, true), + true + ); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index 6f789a10a3a6c..6d1ac864d20fd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; +import org.elasticsearch.xpack.security.authc.support.mapper.ClusterStateRoleMapper; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.junit.Before; @@ -29,18 +30,21 @@ import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.iterableWithSize; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class TransportPutRoleMappingActionTests extends ESTestCase { private NativeRoleMappingStore store; private TransportPutRoleMappingAction action; private AtomicReference requestRef; + private ClusterStateRoleMapper clusterStateRoleMapper; @SuppressWarnings("unchecked") @Before @@ -55,7 +59,9 @@ public void setupMocks() { null, Collections.emptySet() ); - action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, store); + clusterStateRoleMapper = mock(); + when(clusterStateRoleMapper.hasMapping(any())).thenReturn(false); + action = new TransportPutRoleMappingAction(mock(ActionFilters.class), transportService, store, clusterStateRoleMapper); requestRef = new AtomicReference<>(null); @@ -85,6 +91,41 @@ public void testPutValidMapping() throws Exception { assertThat(mapping.getMetadata().get("dumb"), equalTo(true)); } + public void testValidMappingClashingClusterStateMapping() throws Exception { + final FieldExpression expression = new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*"))); + final PutRoleMappingResponse response = put("anarchy", expression, "superuser", Collections.singletonMap("dumb", true)); + when(clusterStateRoleMapper.hasMapping(any())).thenReturn(true); + + assertThat(response.isCreated(), equalTo(true)); + + final ExpressionRoleMapping mapping = requestRef.get().getMapping(); + assertThat(mapping.getExpression(), is(expression)); + assertThat(mapping.isEnabled(), equalTo(true)); + assertThat(mapping.getName(), equalTo("anarchy")); + assertThat(mapping.getRoles(), iterableWithSize(1)); + assertThat(mapping.getRoles(), contains("superuser")); + assertThat(mapping.getMetadata(), aMapWithSize(1)); + assertThat(mapping.getMetadata().get("dumb"), equalTo(true)); + } + + public void testInvalidSuffix() { + final FieldExpression expression = new FieldExpression("username", Collections.singletonList(new FieldExpression.FieldValue("*"))); + String name = ExpressionRoleMapping.addReadOnlySuffix("anarchy"); + final var ex = expectThrows(IllegalArgumentException.class, () -> { + put(name, expression, "superuser", Collections.singletonMap("dumb", true)); + }); + assertThat( + ex.getMessage(), + containsString( + "Invalid mapping name [" + + name + + "]. [" + + ExpressionRoleMapping.READ_ONLY_ROLE_MAPPING_SUFFIX + + "] is not an allowed suffix" + ) + ); + } + private PutRoleMappingResponse put(String name, FieldExpression expression, String role, Map metadata) throws Exception { final PutRoleMappingRequest request = new PutRoleMappingRequest(); diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java index 8e2f713e6ed3e..d9cbbaafa1779 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexVisitorTests.java @@ -83,9 +83,10 @@ private void doTestGeometry(LongFunction h3ToGeometry, boolean hasArea visitor.reset(centerChild); reader.visit(visitor); if (hasArea) { - if (h3CrossesDateline && visitor.getLeftX() > visitor.getRightX()) { - // if both polygons crosses the dateline it cannot be inside due to the polygon splitting technique - assertEquals("failing h3: " + h3, GeoRelation.QUERY_CROSSES, visitor.relation()); + if (h3CrossesDateline) { + // if the h3 crosses the dateline, we might get CROSSES due to the polygon splitting technique. We can't + // be sure which one is the correct one, so we just check that it is not DISJOINT + assertNotSame("failing h3: " + h3, GeoRelation.QUERY_DISJOINT, visitor.relation()); } else { assertEquals("failing h3: " + h3, GeoRelation.QUERY_INSIDE, visitor.relation()); } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml index 7d1a4e123299b..b51bbdc4d2f87 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/60_usage.yml @@ -23,7 +23,86 @@ setup: type: integer --- -"Basic ESQL usage output (telemetry)": +"Basic ESQL usage output (telemetry) snapshot version": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [ snapshot_test_for_telemetry ] + reason: "Test that should only be executed on snapshot versions" + + - do: {xpack.usage: {}} + - match: { esql.available: true } + - match: { esql.enabled: true } + - length: { esql.features: 15 } + - set: {esql.features.dissect: dissect_counter} + - set: {esql.features.drop: drop_counter} + - set: {esql.features.eval: eval_counter} + - set: {esql.features.enrich: enrich_counter} + - set: {esql.features.from: from_counter} + - set: {esql.features.grok: grok_counter} + - set: {esql.features.keep: keep_counter} + - set: {esql.features.limit: limit_counter} + - set: {esql.features.mv_expand: mv_expand_counter} + - set: {esql.features.rename: rename_counter} + - set: {esql.features.row: row_counter} + - set: {esql.features.show: show_counter} + - set: {esql.features.sort: sort_counter} + - set: {esql.features.stats: stats_counter} + - set: {esql.features.where: where_counter} + - length: { esql.queries: 3 } + - set: {esql.queries.rest.total: rest_total_counter} + - set: {esql.queries.rest.failed: rest_failed_counter} + - set: {esql.queries.kibana.total: kibana_total_counter} + - set: {esql.queries.kibana.failed: kibana_failed_counter} + - set: {esql.queries._all.total: all_total_counter} + - set: {esql.queries._all.failed: all_failed_counter} + - set: {esql.functions.max: functions_max} + - set: {esql.functions.min: functions_min} + - set: {esql.functions.cos: functions_cos} + - set: {esql.functions.to_long: functions_to_long} + - set: {esql.functions.coalesce: functions_coalesce} + + - do: + esql.query: + body: + query: 'from test | where data > 2 and to_long(data) > 2 | sort count desc | limit 5 | stats m = max(data)' + + - do: {xpack.usage: {}} + - match: { esql.available: true } + - match: { esql.enabled: true } + - match: {esql.features.dissect: $dissect_counter} + - match: {esql.features.eval: $eval_counter} + - match: {esql.features.grok: $grok_counter} + - gt: {esql.features.limit: $limit_counter} + - gt: {esql.features.sort: $sort_counter} + - gt: {esql.features.stats: $stats_counter} + - gt: {esql.features.where: $where_counter} + - gt: {esql.queries.rest.total: $rest_total_counter} + - match: {esql.queries.rest.failed: $rest_failed_counter} + - match: {esql.queries.kibana.total: $kibana_total_counter} + - match: {esql.queries.kibana.failed: $kibana_failed_counter} + - gt: {esql.queries._all.total: $all_total_counter} + - match: {esql.queries._all.failed: $all_failed_counter} + - gt: {esql.functions.max: $functions_max} + - match: {esql.functions.min: $functions_min} + - match: {esql.functions.cos: $functions_cos} + - gt: {esql.functions.to_long: $functions_to_long} + - match: {esql.functions.coalesce: $functions_coalesce} + - length: {esql.functions: 117} # check the "sister" test below for a likely update to the same esql.functions length check + +--- +"Basic ESQL usage output (telemetry) non-snapshot version": + - requires: + test_runner_features: [capabilities] + capabilities: + - method: POST + path: /_query + parameters: [] + capabilities: [ non_snapshot_test_for_telemetry ] + reason: "Test that should only be executed on release versions" - do: {xpack.usage: {}} - match: { esql.available: true } @@ -83,3 +162,4 @@ setup: - match: {esql.functions.cos: $functions_cos} - gt: {esql.functions.to_long: $functions_to_long} - match: {esql.functions.coalesce: $functions_coalesce} + - length: {esql.functions: 115} # check the "sister" test above for a likely update to the same esql.functions length check diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/settings_endpoints/10_watcher_settings.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/settings_endpoints/10_watcher_settings.yml new file mode 100644 index 0000000000000..f639b4f8f1a77 --- /dev/null +++ b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/settings_endpoints/10_watcher_settings.yml @@ -0,0 +1,104 @@ +--- +setup: + - do: + cluster.health: + wait_for_status: yellow + - do: + watcher.put_watch: + id: "my_watch" + body: > + { + "trigger": { + "schedule": { + "hourly": { + "minute": [ 0, 5 ] + } + } + }, + "input": { + "simple": { + "payload": { + "send": "yes" + } + } + }, + "condition": { + "always": {} + }, + "actions": { + "test_index": { + "index": { + "index": "test" + } + } + } + } + +--- +"Test update and get watch settings api": + - do: + watcher.get_settings: { } + + - match: { index.auto_expand_replicas: "0-1" } + - match: { index.number_of_replicas: "0" } + + - do: + watcher.update_settings: + body: + index.auto_expand_replicas: "0-all" + + - do: + watcher.get_settings: { } + + - match: { index.auto_expand_replicas: "0-all" } + - is_false: index.routing.allocation.include._tier_preference + + - do: + watcher.update_settings: + body: + index.auto_expand_replicas: null + index.number_of_replicas: 1 + + - do: + watcher.get_settings: { } + + - match: { index.number_of_replicas: "1" } +--- +"Test disallowed setting name throws error": + - requires: + test_runner_features: regex + - do: + watcher.update_settings: + body: + index.disallowed_setting: "some_invalid_value" + catch: bad_request + - match: + error: + type: "action_request_validation_exception" + reason: '/illegal settings\: \[index.disallowed_setting\].*/' +--- +"Test allowed prefix setting name": + - do: + watcher.update_settings: + body: + index.routing.allocation.include.role: "watcher" + index.routing.allocation.exclude.role: "noWatcher" + index.routing.allocation.require.role: "mustWatcher" + - do: + watcher.get_settings: { } + - match: { index.routing.allocation.include.role: "watcher" } + - match: { index.routing.allocation.exclude.role: "noWatcher" } + - match: { index.routing.allocation.require.role: "mustWatcher" } +--- +"Test explicitly disallowed prefix setting name throws error": + - requires: + test_runner_features: regex + - do: + watcher.update_settings: + body: + index.routing.allocation.include.disallowed_prefix: "some_invalid_value" + catch: bad_request + - match: + error: + type: "action_request_validation_exception" + reason: '/illegal settings\: \[index.routing.allocation.include.disallowed_prefix\].*/' diff --git a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/update_settings/10_update_watcher_settings.yml b/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/update_settings/10_update_watcher_settings.yml deleted file mode 100644 index d7478d643a98a..0000000000000 --- a/x-pack/plugin/watcher/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/watcher/update_settings/10_update_watcher_settings.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -setup: - - do: - cluster.health: - wait_for_status: yellow - ---- -"Test update and get watch settings api": - - do: - watcher.put_watch: - id: "my_watch" - body: > - { - "trigger": { - "schedule": { - "hourly": { - "minute": [ 0, 5 ] - } - } - }, - "input": { - "simple": { - "payload": { - "send": "yes" - } - } - }, - "condition": { - "always": {} - }, - "actions": { - "test_index": { - "index": { - "index": "test" - } - } - } - } - - match: { _id: "my_watch" } - - - do: - watcher.get_settings: {} - - - match: { index.auto_expand_replicas: "0-1" } - - match: { index.number_of_replicas: "0" } - - - do: - watcher.update_settings: - body: - index.auto_expand_replicas: "0-all" - - - do: - watcher.get_settings: {} - - - match: { index.auto_expand_replicas: "0-all" } - - - do: - watcher.update_settings: - body: - index.auto_expand_replicas: null - index.number_of_replicas: 1 - - - do: - watcher.get_settings: {} - - - match: { index.number_of_replicas: "1" } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java index 29349735afcd2..2962bffd68b66 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportGetWatcherSettingsAction.java @@ -23,8 +23,10 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.watcher.transport.actions.put.GetWatcherSettingsAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction; +import static org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction.ALLOWED_SETTINGS_PREFIXES; +import static org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction.ALLOWED_SETTING_KEYS; +import static org.elasticsearch.xpack.core.watcher.transport.actions.put.UpdateWatcherSettingsAction.EXPLICITLY_DENIED_SETTINGS; import static org.elasticsearch.xpack.watcher.transport.actions.TransportUpdateWatcherSettingsAction.WATCHER_INDEX_NAME; import static org.elasticsearch.xpack.watcher.transport.actions.TransportUpdateWatcherSettingsAction.WATCHER_INDEX_REQUEST; @@ -73,11 +75,14 @@ protected void masterOperation( */ private static Settings filterSettableSettings(Settings settings) { Settings.Builder builder = Settings.builder(); - for (String settingName : UpdateWatcherSettingsAction.ALLOWED_SETTING_KEYS) { - if (settings.hasValue(settingName)) { - builder.put(settingName, settings.get(settingName)); - } - } + settings.keySet() + .stream() + .filter( + setting -> (ALLOWED_SETTING_KEYS.contains(setting) + || ALLOWED_SETTINGS_PREFIXES.stream().anyMatch(prefix -> setting.startsWith(prefix + "."))) + && EXPLICITLY_DENIED_SETTINGS.contains(setting) == false + ) + .forEach(setting -> builder.put(setting, settings.get(setting))); return builder.build(); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java index ced131640f0ee..cc8d0edf37014 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleTriggerEngine.java @@ -17,6 +17,8 @@ import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; +import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleRegistry; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; @@ -32,6 +34,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -67,7 +70,11 @@ public synchronized void start(Collection jobs) { Map startingSchedules = Maps.newMapWithExpectedSize(jobs.size()); for (Watch job : jobs) { if (job.trigger() instanceof ScheduleTrigger trigger) { - startingSchedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); + if (trigger.getSchedule() instanceof IntervalSchedule) { + startingSchedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), calculateLastStartTime(job))); + } else { + startingSchedules.put(job.id(), new ActiveSchedule(job.id(), trigger.getSchedule(), startTime)); + } } } // why are we calling putAll() here instead of assigning a brand @@ -108,10 +115,39 @@ public void add(Watch watch) { // watcher indexing listener // this also means that updating an existing watch would not retrigger the schedule time, if it remains the same schedule if (currentSchedule == null || currentSchedule.schedule.equals(trigger.getSchedule()) == false) { - schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), clock.millis())); + if (trigger.getSchedule() instanceof IntervalSchedule) { + schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), calculateLastStartTime(watch))); + } else { + schedules.put(watch.id(), new ActiveSchedule(watch.id(), trigger.getSchedule(), clock.millis())); + } + } } + /** + * Attempts to calculate the epoch millis of the last time the watch was checked, If the watch has never been checked, the timestamp of + * the last state change is used. If the watch has never been checked and has never been in an active state, the current time is used. + * @param job the watch to calculate the last start time for + * @return the epoch millis of the last time the watch was checked or now + */ + private long calculateLastStartTime(Watch job) { + var lastChecked = Optional.ofNullable(job) + .map(Watch::status) + .map(WatchStatus::lastChecked) + .map(ZonedDateTime::toInstant) + .map(Instant::toEpochMilli); + + return lastChecked.orElseGet( + () -> Optional.ofNullable(job) + .map(Watch::status) + .map(WatchStatus::state) + .map(WatchStatus.State::getTimestamp) + .map(ZonedDateTime::toInstant) + .map(Instant::toEpochMilli) + .orElse(clock.millis()) + ); + } + @Override public boolean remove(String jobId) { logger.debug("Removing watch [{}] from engine (engine is running: {})", jobId, isRunning.get()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java index 8b7cfa75f9229..9a12b8f394eb2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/schedule/engine/TickerScheduleEngineTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.core.watcher.trigger.TriggerEvent; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.core.watcher.watch.Watch; +import org.elasticsearch.xpack.core.watcher.watch.WatchStatus; import org.elasticsearch.xpack.watcher.condition.InternalAlwaysCondition; import org.elasticsearch.xpack.watcher.input.none.ExecutableNoneInput; import org.elasticsearch.xpack.watcher.trigger.schedule.Schedule; @@ -283,6 +284,244 @@ public void testAddOnlyWithNewSchedule() { assertThat(engine.getSchedules().get("_id"), not(is(activeSchedule))); } + /** + * This test verifies that a watch with a valid lastCheckedTime executes before the interval time to ensure the job resumes waiting + * from the same point it left off before the reallocation / restart + */ + public void testWatchWithLastCheckedTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus(-1L, null, null, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC), null, null, null), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var watches = Collections.singletonList(watch); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(watches); + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + + /** + * This test verifies that a watch without a lastCheckedTime but with a valid activationTime executes before the interval time to + * ensure the job resumes waiting from the same point it left off before the reallocation / restart + */ + public void testWatchWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus( + -1L, + new WatchStatus.State(true, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC)), + null, + null, + null, + null, + null + ), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var watches = Collections.singletonList(watch); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(watches); + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + + /** + * This test verifies that a watch added after service start with a lastCheckedTime executes before the interval time to ensure the job + * resumes waiting from the same point it left off before the reallocation / restart + */ + public void testAddWithLastCheckedTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus(-1L, null, null, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC), null, null, null), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(Collections.emptyList()); + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + engine.add(watch); + + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + + /** + * This test verifies that a watch added after service start without a lastCheckedTime but with a valid activationTime executes before + * the interval time to ensure the job resumes waiting from the same point it left off before the reallocation / restart + */ + public void testAddWithNoLastCheckedTimeButHasActivationTimeExecutesBeforeInitialInterval() throws Exception { + final var firstLatch = new CountDownLatch(1); + final var secondLatch = new CountDownLatch(1); + + Watch watch = new Watch( + "watch", + new ScheduleTrigger(interval("1s")), + new ExecutableNoneInput(), + InternalAlwaysCondition.INSTANCE, + null, + null, + Collections.emptyList(), + null, + new WatchStatus( + -1L, + new WatchStatus.State(true, clock.instant().minusMillis(500).atZone(ZoneOffset.UTC)), + null, + null, + null, + null, + null + ), + SequenceNumbers.UNASSIGNED_SEQ_NO, + SequenceNumbers.UNASSIGNED_PRIMARY_TERM + ); + + var runCount = new AtomicInteger(0); + + engine.register(events -> { + for (TriggerEvent ignored : events) { + if (runCount.get() == 0) { + logger.info("job first fire"); + firstLatch.countDown(); + } else { + logger.info("job second fire"); + secondLatch.countDown(); + } + runCount.incrementAndGet(); + } + }); + + engine.start(Collections.emptyList()); + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + engine.add(watch); + + advanceClockIfNeeded(clock.instant().plusMillis(510).atZone(ZoneOffset.UTC)); + if (firstLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + advanceClockIfNeeded(clock.instant().plusMillis(1100).atZone(ZoneOffset.UTC)); + if (secondLatch.await(3, TimeUnit.SECONDS) == false) { + fail("waiting too long for all watches to be triggered"); + } + + assertThat(runCount.get(), is(2)); + + engine.stop(); + } + private Watch createWatch(String name, Schedule schedule) { return new Watch( name,