diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 42af0d79b34da..3d20e3fb73b8e 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -74,3 +74,8 @@ if [[ "${USE_SNYK_CREDENTIALS:-}" == "true" ]]; then SNYK_TOKEN=$(vault read -field=token secret/ci/elastic-elasticsearch/migrated/snyk) export SNYK_TOKEN fi + +if [[ "$BUILDKITE_AGENT_META_DATA_PROVIDER" != *"k8s"* ]]; then + # Run in the background, while the job continues + nohup .buildkite/scripts/setup-monitoring.sh /dev/null 2>&1 & +fi diff --git a/.buildkite/scripts/setup-monitoring.sh b/.buildkite/scripts/setup-monitoring.sh new file mode 100755 index 0000000000000..95a5b90effea2 --- /dev/null +++ b/.buildkite/scripts/setup-monitoring.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -euo pipefail + +ELASTIC_AGENT_URL=$(vault read -field=url secret/ci/elastic-elasticsearch/elastic-agent-token) +ELASTIC_AGENT_TOKEN=$(vault read -field=token secret/ci/elastic-elasticsearch/elastic-agent-token) + +if [[ ! -d /opt/elastic-agent ]]; then + sudo mkdir /opt/elastic-agent + sudo chown -R buildkite-agent:buildkite-agent /opt/elastic-agent + cd /opt/elastic-agent + + archive=elastic-agent-8.10.1-linux-x86_64.tar.gz + if [ "$(uname -m)" = "arm64" ] || [ "$(uname -m)" = "aarch64" ]; then + archive=elastic-agent-8.10.1-linux-arm64.tar.gz + fi + + curl -L -O "https://artifacts.elastic.co/downloads/beats/elastic-agent/$archive" + + tar xzf "$archive" --directory=. --strip-components=1 +fi + +cd /opt/elastic-agent +sudo ./elastic-agent install -f --url="$ELASTIC_AGENT_URL" --enrollment-token="$ELASTIC_AGENT_TOKEN" diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java index 9daa5c24f3bd4..a9338d5660b28 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -152,7 +152,7 @@ public void setUp() throws Exception { .metadata(metadata) .routingTable(routingTable) .nodes(nb) - .compatibilityVersions(compatibilityVersions) + .nodeIdsToCompatibilityVersions(compatibilityVersions) .build(); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index e9438eabadbb6..088f8290e713f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -67,6 +67,7 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":qa:remote-clusters"); map.put(LegacyRestTestBasePlugin.class, ":qa:repository-multi-version"); map.put(LegacyRestTestBasePlugin.class, ":qa:rolling-upgrade"); + map.put(LegacyRestTestBasePlugin.class, ":qa:rolling-upgrade-legacy"); map.put(LegacyRestTestBasePlugin.class, ":qa:smoke-test-http"); map.put(LegacyRestTestBasePlugin.class, ":qa:smoke-test-ingest-disabled"); map.put(LegacyRestTestBasePlugin.class, ":qa:smoke-test-ingest-with-all-dependencies"); diff --git a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml index 211faf973b772..9f074513b6d4e 100644 --- a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml +++ b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml @@ -32,7 +32,7 @@ - + diff --git a/docs/changelog/99091.yaml b/docs/changelog/99091.yaml new file mode 100644 index 0000000000000..2c7be19b161ba --- /dev/null +++ b/docs/changelog/99091.yaml @@ -0,0 +1,5 @@ +pr: 99091 +summary: Add flamegraph API +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/99193.yaml b/docs/changelog/99193.yaml new file mode 100644 index 0000000000000..9db646dc80435 --- /dev/null +++ b/docs/changelog/99193.yaml @@ -0,0 +1,5 @@ +pr: 99193 +summary: Wait for cluster state in recovery +area: Recovery +type: enhancement +issues: [] diff --git a/docs/changelog/99278.yaml b/docs/changelog/99278.yaml new file mode 100644 index 0000000000000..f2788a00e6369 --- /dev/null +++ b/docs/changelog/99278.yaml @@ -0,0 +1,5 @@ +pr: 99278 +summary: Support rotatating the JWT shared secret +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/99432.yaml b/docs/changelog/99432.yaml new file mode 100644 index 0000000000000..df4c5a7f78199 --- /dev/null +++ b/docs/changelog/99432.yaml @@ -0,0 +1,5 @@ +pr: 99432 +summary: "ESQL: Enable arithmetics for durations and periods" +area: ES|QL +type: enhancement +issues: [99293] diff --git a/docs/changelog/99474.yaml b/docs/changelog/99474.yaml new file mode 100644 index 0000000000000..ea23481069833 --- /dev/null +++ b/docs/changelog/99474.yaml @@ -0,0 +1,5 @@ +pr: 99474 +summary: Add `java.net.NetPermission` to APM module's permissions +area: Infra/Core +type: bug +issues: [] diff --git a/docs/changelog/99555.yaml b/docs/changelog/99555.yaml new file mode 100644 index 0000000000000..5e53e8782e08c --- /dev/null +++ b/docs/changelog/99555.yaml @@ -0,0 +1,5 @@ +pr: 99555 +summary: Use mappings version to retrieve system index mappings at creation time +area: Infra/Core +type: enhancement +issues: [] diff --git a/docs/changelog/99588.yaml b/docs/changelog/99588.yaml new file mode 100644 index 0000000000000..7cbb53376fdf0 --- /dev/null +++ b/docs/changelog/99588.yaml @@ -0,0 +1,6 @@ +pr: 99588 +summary: Make ESQL more resilient to non-indexed fields +area: ES|QL +type: bug +issues: + - 99506 diff --git a/docs/changelog/99627.yaml b/docs/changelog/99627.yaml new file mode 100644 index 0000000000000..84abdf6418dc2 --- /dev/null +++ b/docs/changelog/99627.yaml @@ -0,0 +1,5 @@ +pr: 99627 +summary: Fix thread context in `getRepositoryData` +area: Snapshot/Restore +type: bug +issues: [] diff --git a/docs/changelog/99641.yaml b/docs/changelog/99641.yaml new file mode 100644 index 0000000000000..c74f7380bd93a --- /dev/null +++ b/docs/changelog/99641.yaml @@ -0,0 +1,5 @@ +pr: 99641 +summary: Chunk the cluster allocation explain response +area: Network +type: enhancement +issues: [97803] diff --git a/docs/changelog/99644.yaml b/docs/changelog/99644.yaml new file mode 100644 index 0000000000000..10c10448c074c --- /dev/null +++ b/docs/changelog/99644.yaml @@ -0,0 +1,6 @@ +pr: 99644 +summary: Add links to docs from failing bootstrap checks +area: Infra/Node Lifecycle +type: enhancement +issues: [99614] + diff --git a/docs/changelog/99655.yaml b/docs/changelog/99655.yaml new file mode 100644 index 0000000000000..3d1e76ec47aa3 --- /dev/null +++ b/docs/changelog/99655.yaml @@ -0,0 +1,5 @@ +pr: 99655 +summary: "[Profiling] Allow to wait until resources created" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/99677.yaml b/docs/changelog/99677.yaml new file mode 100644 index 0000000000000..04c1c28cf2e12 --- /dev/null +++ b/docs/changelog/99677.yaml @@ -0,0 +1,5 @@ +pr: 99677 +summary: Using 1 MB chunks for elser model storage +area: Machine Learning +type: bug +issues: [ ] diff --git a/docs/changelog/99682.yaml b/docs/changelog/99682.yaml new file mode 100644 index 0000000000000..48e99a5145674 --- /dev/null +++ b/docs/changelog/99682.yaml @@ -0,0 +1,5 @@ +pr: 99682 +summary: Increase the max vector dims to 4096 +area: Vector Search +type: enhancement +issues: [] diff --git a/docs/changelog/99685.yaml b/docs/changelog/99685.yaml new file mode 100644 index 0000000000000..43dac2abbb312 --- /dev/null +++ b/docs/changelog/99685.yaml @@ -0,0 +1,5 @@ +pr: 99685 +summary: Fix `advanceExact` for doc values from sources +area: Search +type: bug +issues: [] diff --git a/docs/changelog/99717.yaml b/docs/changelog/99717.yaml new file mode 100644 index 0000000000000..db48c69ed68a2 --- /dev/null +++ b/docs/changelog/99717.yaml @@ -0,0 +1,5 @@ +pr: 99717 +summary: Treating watcher webhook response header names as case-insensitive +area: Watcher +type: bug +issues: [] diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 48b055f4e0fa2..7c3cd8716dfe3 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -1142,6 +1142,7 @@ You cannot specify both `reindex.ssl.certificate_authorities` and `reindex.ssl.truststore.password`:: The password to the truststore (`reindex.ssl.truststore.path`). +deprecated:[7.17.0] Prefer `reindex.ssl.truststore.secure_password` instead. This setting cannot be used with `reindex.ssl.truststore.secure_password`. `reindex.ssl.truststore.secure_password` (<>):: @@ -1175,6 +1176,7 @@ You cannot specify both `reindex.ssl.key` and `reindex.ssl.keystore.path`. `reindex.ssl.key_passphrase`:: Specifies the passphrase to decrypt the PEM encoded private key (`reindex.ssl.key`) if it is encrypted. +deprecated:[7.17.0] Prefer `reindex.ssl.secure_key_passphrase` instead. Cannot be used with `reindex.ssl.secure_key_passphrase`. `reindex.ssl.secure_key_passphrase` (<>):: @@ -1194,8 +1196,9 @@ If the keystore path ends in ".p12", ".pfx" or "pkcs12", this setting defaults to `PKCS12`. Otherwise, it defaults to `jks`. `reindex.ssl.keystore.password`:: -The password to the keystore (`reindex.ssl.keystore.path`). This setting cannot be used -with `reindex.ssl.keystore.secure_password`. +The password to the keystore (`reindex.ssl.keystore.path`). +deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_password` instead. +This setting cannot be used with `reindex.ssl.keystore.secure_password`. `reindex.ssl.keystore.secure_password` (<>):: The password to the keystore (`reindex.ssl.keystore.path`). @@ -1203,8 +1206,9 @@ This setting cannot be used with `reindex.ssl.keystore.password`. `reindex.ssl.keystore.key_password`:: The password for the key in the keystore (`reindex.ssl.keystore.path`). -Defaults to the keystore password. This setting cannot be used with -`reindex.ssl.keystore.secure_key_password`. +Defaults to the keystore password. +deprecated:[7.17.0] Prefer `reindex.ssl.keystore.secure_key_password` instead. +This setting cannot be used with `reindex.ssl.keystore.secure_key_password`. `reindex.ssl.keystore.secure_key_password` (<>):: The password for the key in the keystore (`reindex.ssl.keystore.path`). diff --git a/docs/reference/esql/functions/date_format.asciidoc b/docs/reference/esql/functions/date_format.asciidoc index 3f61e07221111..40bf024a3469d 100644 --- a/docs/reference/esql/functions/date_format.asciidoc +++ b/docs/reference/esql/functions/date_format.asciidoc @@ -7,5 +7,5 @@ is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. ---- FROM employees | KEEP first_name, last_name, hire_date -| EVAL hired = DATE_FORMAT(hire_date, "YYYY-MM-dd") +| EVAL hired = DATE_FORMAT("YYYY-MM-dd", hire_date) ---- diff --git a/docs/reference/esql/functions/types/date_extract.asciidoc b/docs/reference/esql/functions/types/date_extract.asciidoc index 57a83810d9b7c..9963c85b2af85 100644 --- a/docs/reference/esql/functions/types/date_extract.asciidoc +++ b/docs/reference/esql/functions/types/date_extract.asciidoc @@ -1,5 +1,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== arg1 | arg2 | result -datetime | keyword | long +keyword | datetime | long |=== diff --git a/docs/reference/ingest/processors/reroute.asciidoc b/docs/reference/ingest/processors/reroute.asciidoc index eb7eb211cd62f..482ff3b1cc116 100644 --- a/docs/reference/ingest/processors/reroute.asciidoc +++ b/docs/reference/ingest/processors/reroute.asciidoc @@ -4,8 +4,6 @@ Reroute ++++ -experimental::[] - The `reroute` processor allows to route a document to another target index or data stream. It has two main modes: diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index fb50ee36644a6..76c9313374b13 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -62,7 +62,7 @@ In many cases, a brute-force kNN search is not efficient enough. For this reason, the `dense_vector` type supports indexing vectors into a specialized data structure to support fast kNN retrieval through the <> in the search API -Unmapped array fields of float elements with size between 128 and 2048 are dynamically mapped as `dense_vector` with a default similariy of `cosine`. +Unmapped array fields of float elements with size between 128 and 4096 are dynamically mapped as `dense_vector` with a default similariy of `cosine`. You can override the default similarity by explicitly mapping the field as `dense_vector` with the desired similarity. Indexing is enabled by default for dense vector fields. @@ -132,7 +132,7 @@ integer values between -128 to 127, inclusive for both indexing and searching. `dims`:: (Optional, integer) -Number of vector dimensions. Can't exceed `2048`. If `dims` is not specified, +Number of vector dimensions. Can't exceed `4096`. If `dims` is not specified, it will be set to the length of the first vector added to the field. `index`:: diff --git a/docs/reference/settings/common-defs.asciidoc b/docs/reference/settings/common-defs.asciidoc index 33e736c70046e..a369b3870d9c4 100644 --- a/docs/reference/settings/common-defs.asciidoc +++ b/docs/reference/settings/common-defs.asciidoc @@ -57,20 +57,21 @@ end::ssl-key-pem[] tag::ssl-key-passphrase[] The passphrase that is used to decrypt the private key. Since the key might not -be encrypted, this value is optional. +be encrypted, this value is optional. deprecated:[7.17.0] Prefer `ssl.secure_key_passphrase` instead. + You cannot use this setting and `ssl.secure_key_passphrase` at the same time. end::ssl-key-passphrase[] tag::ssl-keystore-key-password[] The password for the key in the keystore. The default is the keystore password. +deprecated:[7.17.0] Prefer `ssl.keystore.secure_key_password` instead. + You cannot use this setting and `ssl.keystore.secure_password` at the same time. //TBD: You cannot use this setting and `ssl.keystore.secure_key_password` at the same time. end::ssl-keystore-key-password[] tag::ssl-keystore-password[] -The password for the keystore. +The password for the keystore. deprecated:[7.17.0] Prefer `ssl.keystore.secure_password` instead. //TBD: You cannot use this setting and `ssl.keystore.secure_password` at the same time. end::ssl-keystore-password[] @@ -122,7 +123,7 @@ or `SSLv3`. See <>. end::ssl-supported-protocols[] tag::ssl-truststore-password[] -The password for the truststore. +The password for the truststore. deprecated:[7.17.0] Prefer `ssl.truststore.secure_password` instead. + You cannot use this setting and `ssl.truststore.secure_password` at the same time. @@ -160,7 +161,7 @@ Authority (CA); has a `hostname` or IP address that matches the names within the certificate. `certificate`:: -Validates the provided certificate and verifies that it's signed by a +Validates the provided certificate and verifies that it's signed by a trusted authority (CA), but doesn't check the certificate `hostname`. `none`:: @@ -173,4 +174,4 @@ resolve TLS errors. ===== + Defaults to `full`. -end::ssl-verification-mode-values[] \ No newline at end of file +end::ssl-verification-mode-values[] diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index 2ff94e6885226..a10116005a6b5 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -49,7 +49,7 @@ and {ls} is ignored. [[xpack-monitoring-collection-interval]] // tag::monitoring-collection-interval-tag[] `xpack.monitoring.collection.interval` {ess-icon}:: -deprecated:[6.3.0,"Use `xpack.monitoring.collection.enabled` set to `false` instead."] +deprecated:[6.3.0,"Use `xpack.monitoring.collection.enabled` set to `false` instead."] (<>) Setting to `-1` to disable data collection is no longer supported beginning with 7.0.0. + @@ -60,7 +60,7 @@ option in `kibana.yml` to the same value. `xpack.monitoring.elasticsearch.collection.enabled`:: (<>) deprecated:[7.16.0] Controls whether statistics about your -{es} cluster should be collected. Defaults to `true`. This is different from +{es} cluster should be collected. Defaults to `true`. This is different from `xpack.monitoring.collection.enabled`, which allows you to enable or disable all monitoring collection. However, this setting simply disables the collection of {es} data while still allowing other data (e.g., {kib}, {ls}, Beats, or APM @@ -285,18 +285,18 @@ For example: `["elasticsearch_version_mismatch","xpack_license_expiration"]`. You can configure the following TLS/SSL settings. +{ssl-prefix}.ssl.supported_protocols+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-supported-protocols] ifdef::verifies[] +{ssl-prefix}.ssl.verification_mode+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] Controls the verification of certificates. include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-verification-mode-values] endif::verifies[] +{ssl-prefix}.ssl.cipher_suites+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-cipher-suites-values] [#{ssl-context}-tls-ssl-key-trusted-certificate-settings] @@ -318,19 +318,19 @@ When using PEM encoded files, use the following settings: include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-key-pem] +{ssl-prefix}.ssl.key_passphrase+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-key-passphrase] +{ssl-prefix}.ssl.secure_key_passphrase+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-secure-key-passphrase] +{ssl-prefix}.ssl.certificate+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-certificate] +{ssl-prefix}.ssl.certificate_authorities+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-certificate-authorities] ===== Java keystore files @@ -339,35 +339,35 @@ When using Java keystore files (JKS), which contain the private key, certificate and certificates that should be trusted, use the following settings: +{ssl-prefix}.ssl.keystore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-path] +{ssl-prefix}.ssl.keystore.password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-password] +{ssl-prefix}.ssl.keystore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-password] +{ssl-prefix}.ssl.keystore.key_password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-key-password] +{ssl-prefix}.ssl.keystore.secure_key_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-key-password] +{ssl-prefix}.ssl.truststore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-path] +{ssl-prefix}.ssl.truststore.password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-password] +{ssl-prefix}.ssl.truststore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-secure-password] [#{ssl-context}-pkcs12-files] @@ -379,43 +379,43 @@ that contain the private key, certificate and certificates that should be truste PKCS#12 files are configured in the same way as Java keystore files: +{ssl-prefix}.ssl.keystore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-path] +{ssl-prefix}.ssl.keystore.type+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-type-pkcs12] +{ssl-prefix}.ssl.keystore.password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-password] +{ssl-prefix}.ssl.keystore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-password] +{ssl-prefix}.ssl.keystore.key_password+:: -(<>) deprecated:[7.16.0] +(<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-key-password] +{ssl-prefix}.ssl.keystore.secure_key_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-key-password] +{ssl-prefix}.ssl.truststore.path+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-path] +{ssl-prefix}.ssl.truststore.type+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] Set this to `PKCS12` to indicate that the truststore is a PKCS#12 file. //TBD:Should this use the ssl-truststore-type definition and default values? +{ssl-prefix}.ssl.truststore.password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-password] +{ssl-prefix}.ssl.truststore.secure_password+:: -(<>) deprecated:[7.16.0] +(<>) deprecated:[7.16.0] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-truststore-secure-password] diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index bffd99a3ab5d8..f1949266f07c5 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -2266,11 +2266,20 @@ restricts which ones are allowed to submit those JWTs to {es}. // tag::jwt-client-authentication-shared-secret-tag[] `client_authentication.shared_secret` {ess-icon}:: -(<>) +(<>, <>) Secret value string for client authentication. Required if `client_authentication.type` is `shared_secret`. // end::jwt-client-authentication-shared-secret-tag[] +// tag::jwt-client-authentication-rotation-grace-period-tag[] +`client_authentication.rotation_grace_period`:: +(<>) +Sets the grace period for how long after rotating the `client_authentication.shared_secret` +is valid. `client_authentication.shared_secret` can be rotated by updating the +keystore then calling the <>. +Defaults to `1m`. +// end::jwt-client-authentication-rotation-grace-period-tag[] + // tag::jwt-http-connect-timeout-tag[] `http.connect_timeout` {ess-icon}:: (<>) @@ -2414,6 +2423,12 @@ include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-path] include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-type-pkcs12] // end::jwt-ssl-keystore-type-tag[] +// tag::jwt-ssl-keystore-password-tag[] +`ssl.keystore.password` {ess-icon}:: +(<>) +include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-password] +// end::jwt-ssl-keystore-password-tag[] + `ssl.keystore.secure_password`:: (<>) include::{es-repo-dir}/settings/common-defs.asciidoc[tag=ssl-keystore-secure-password] diff --git a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc index 2d5b4f86e3c77..48e9f802e13f8 100644 --- a/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc +++ b/docs/reference/troubleshooting/common-issues/mapping-explosion.asciidoc @@ -1,70 +1,63 @@ [[mapping-explosion]] === Mapping explosion -{es}'s search and {kibana-ref}/discover.html[{kib}'s discover] Javascript rendering are -dependent on the search's backing indices total amount of -<>, of all mapping depths. When this total -amount is too high or is exponentially climbing, we refer to it as -experiencing mapping explosion. Field counts going this high are uncommon -and usually suggest an upstream document formatting issue as -link:https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion[shown in this blog]. +{es}'s search and {kibana-ref}/discover.html[{kib}'s discover] Javascript rendering are +dependent on the search's backing indices total amount of +<>, of all mapping depths. When this total +amount is too high or is exponentially climbing, we refer to it as +experiencing mapping explosion. Field counts going this high are uncommon +and usually suggest an upstream document formatting issue as +link:https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion[shown in this blog]. Mapping explosion may surface as the following performance symptoms: -* <> reporting high heap or CPU on the main node -and/or nodes hosting the indices shards. This may potentially +* <> reporting high heap or CPU on the main node +and/or nodes hosting the indices shards. This may potentially escalate to temporary node unresponsiveness and/or main overwhelm. -* <> reporting long search durations only related to -this index or indices, even on simple searches. +* <> reporting long search durations only related to +this index or indices, even on simple searches. -* <> reporting long index durations only related to -this index or indices. This usually relates to <> -reporting that the coordinating node is waiting for all other nodes to +* <> reporting long index durations only related to +this index or indices. This usually relates to <> +reporting that the coordinating node is waiting for all other nodes to confirm they are on mapping update request. -* Discover's **Fields for wildcard** page-loading API command or {kibana-ref}/console-kibana.html[Dev Tools] page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or +* Discover's **Fields for wildcard** page-loading API command or {kibana-ref}/console-kibana.html[Dev Tools] page-refreshing Autocomplete API commands are taking a long time (more than 10 seconds) or timing out in the browser's Developer Tools Network tab. * Discover's **Available fields** taking a long time to compile Javascript in the browser's Developer Tools Performance tab. This may potentially escalate to temporary browser page unresponsiveness. -* Kibana's {kibana-ref}/alerting-getting-started.html[alerting] or {security-guide}/detection-engine-overview.html[security rules] may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {kib}'s {kibana-ref}/settings.html#server-maxPayload[`server-maxPayload`]. +* Kibana's {kibana-ref}/alerting-getting-started.html[alerting] or {security-guide}/detection-engine-overview.html[security rules] may error `The content length (X) is bigger than the maximum allowed string (Y)` where `X` is attempted payload and `Y` is {kib}'s {kibana-ref}/settings.html#server-maxPayload[`server-maxPayload`]. -* Long {es} start-up durations. +* Long {es} start-up durations. [discrete] [[prevent]] ==== Prevent or prepare -<> cannot be field-reduced once initialized. -{es} indices default to <> which -doesn't normally cause problems unless it's combined with overriding -<>. The -default `1000` limit is considered generous, though overriding to `10000` -doesn't cause noticable impact depending on use case. However, to give -a bad example, overriding to `100000` and this limit being hit -by mapping totals would usually have strong performance implications. - -If your index mapped fields expect to contain a large, arbitrary set of -keys, you may instead consider: - -* Using the <> data type. Please note, -however, that flattened objects is link:https://github.com/elastic/kibana/issues/25820[not fully supported in {kib}] yet. For example, this could apply to sub-mappings like { `host.name` , -`host.os`, `host.version` }. Desired fields are still accessed by +<> cannot be field-reduced once initialized. +{es} indices default to <> which +doesn't normally cause problems unless it's combined with overriding +<>. The +default `1000` limit is considered generous, though overriding to `10000` +doesn't cause noticable impact depending on use case. However, to give +a bad example, overriding to `100000` and this limit being hit +by mapping totals would usually have strong performance implications. + +If your index mapped fields expect to contain a large, arbitrary set of +keys, you may instead consider: + +* Using the <> data type. Please note, +however, that flattened objects is link:https://github.com/elastic/kibana/issues/25820[not fully supported in {kib}] yet. For example, this could apply to sub-mappings like { `host.name` , +`host.os`, `host.version` }. Desired fields are still accessed by <>. -* Using the <>. This is helpful when you're -interested in storing but not searching a group of fields. This is commonly -used for unknown upstream scenarios which may induce however many fields. -For example, this is recommended when sub-mappings start showing new, -unexpected fields like { `o365.a01`, `o365.a02`, `o365.b01`, `o365.c99`}. - -* Setting <> to disable a particular field's -searchability. This cannot effect current index mapping, but can apply -going forward via an <>. +* Disable <>. +This cannot effect current index mapping, but can apply going forward via an <>. -Modifying to the <> data type would not resolve the core -issue. +Modifying to the <> data type would not resolve the core +issue. [discrete] [[check]] @@ -91,12 +84,12 @@ You can use <> to find fields which [[complex]] ==== Complex explosions -Mapping explosions also covers when an individual index field totals are within limits but combined indices fields totals are very high. It's very common for symptoms to first be noticed on a {kibana-ref}/data-views.html[data view] and be traced back to an individual index or a subset of indices via the +Mapping explosions also covers when an individual index field totals are within limits but combined indices fields totals are very high. It's very common for symptoms to first be noticed on a {kibana-ref}/data-views.html[data view] and be traced back to an individual index or a subset of indices via the <>. -However, though less common, it is possible to only experience mapping explosions on the combination of backing indices. For example, if a <>'s backing indices are all at field total limit but each contain unique fields from one another. +However, though less common, it is possible to only experience mapping explosions on the combination of backing indices. For example, if a <>'s backing indices are all at field total limit but each contain unique fields from one another. -This situation most easily surfaces by adding a {kibana-ref}/data-views.html[data view] and checking its **Fields** tab for its total fields count. This statistic does tells you overall fields and not only where <>, but serves as a good baseline. +This situation most easily surfaces by adding a {kibana-ref}/data-views.html[data view] and checking its **Fields** tab for its total fields count. This statistic does tells you overall fields and not only where <>, but serves as a good baseline. If your issue only surfaces via a {kibana-ref}/data-views.html[data view], you may consider this menu's **Field filters** if you're not using <>. Alternatively, you may consider a more targeted index pattern or using a negative pattern to filter-out problematic indices. For example, if `logs-*` has too high a field count because of problematic backing indices `logs-lotsOfFields-*`, then you could update to either `logs-*,-logs-lotsOfFields-*` or `logs-iMeantThisAnyway-*`. @@ -109,12 +102,12 @@ Mapping explosion is not easily resolved, so it is better prevented via the abov * Disable <>. -* <> into an index with a corrected mapping, +* <> into an index with a corrected mapping, either via <> or <>. * If index is unneeded and/or historical, consider <>. -* {logstash-ref}/plugins-inputs-elasticsearch.html[Export] and {logstash-ref}/plugins-outputs-elasticsearch.html[re-import] data into a mapping-corrected index after {logstash-ref}/plugins-filters-prune.html[pruning] +* {logstash-ref}/plugins-inputs-elasticsearch.html[Export] and {logstash-ref}/plugins-outputs-elasticsearch.html[re-import] data into a mapping-corrected index after {logstash-ref}/plugins-filters-prune.html[pruning] problematic fields via Logstash. -<> would not resolve the core issue. +<> would not resolve the core issue. diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index c6104e92b0b3e..6baa2ddf6c972 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -131,8 +131,8 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.synonyms.SynonymsManagementAPIService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.tartarus.snowball.ext.DutchStemmer; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java index 9a915d0c056d5..af7386ba9b629 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -28,10 +28,10 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.io.IOException; import java.util.Collections; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java index c35127e1f0793..dab356960ee2f 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java @@ -28,10 +28,10 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.util.Collections; diff --git a/modules/apm/build.gradle b/modules/apm/build.gradle index 06a942d38bcd6..c8619c97d1068 100644 --- a/modules/apm/build.gradle +++ b/modules/apm/build.gradle @@ -9,7 +9,7 @@ apply plugin: 'elasticsearch.internal-es-plugin' esplugin { name 'apm' description 'Provides APM integration for Elasticsearch' - classname 'org.elasticsearch.tracing.apm.APM' + classname 'org.elasticsearch.telemetry.apm.APM' } def otelVersion = '1.17.0' diff --git a/modules/apm/src/main/java/module-info.java b/modules/apm/src/main/java/module-info.java index f0a89bb58afe1..0bea3078f5f00 100644 --- a/modules/apm/src/main/java/module-info.java +++ b/modules/apm/src/main/java/module-info.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -module org.elasticsearch.tracing.apm { +module org.elasticsearch.telemetry.apm { requires org.elasticsearch.base; requires org.elasticsearch.server; requires org.elasticsearch.xcontent; @@ -15,5 +15,5 @@ requires io.opentelemetry.context; requires io.opentelemetry.api; - exports org.elasticsearch.tracing.apm; + exports org.elasticsearch.telemetry.apm; } diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java similarity index 93% rename from modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java rename to modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java index f31772087e82f..1208fa1f7b9e5 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APM.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APM.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing.apm; +package org.elasticsearch.telemetry.apm; import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.internal.Client; @@ -24,8 +24,10 @@ import org.elasticsearch.plugins.TracerPlugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.apm.settings.APMAgentSettings; +import org.elasticsearch.telemetry.apm.tracing.APMTracer; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -35,7 +37,7 @@ /** * This module integrates Elastic's APM product with Elasticsearch. Elasticsearch has - * a {@link org.elasticsearch.tracing.Tracer} interface, which this module implements via + * a {@link org.elasticsearch.telemetry.tracing.Tracer} interface, which this module implements via * {@link APMTracer}. We use the OpenTelemetry API to capture "spans", and attach the * Elastic APM Java to ship those spans to an APM server. Although it is possible to * programmatically attach the agent, the Security Manager permissions required for this diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/settings/APMAgentSettings.java similarity index 84% rename from modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java rename to modules/apm/src/main/java/org/elasticsearch/telemetry/apm/settings/APMAgentSettings.java index 3c75fe2e94c92..49317ab36543d 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMAgentSettings.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/settings/APMAgentSettings.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing.apm; +package org.elasticsearch.telemetry.apm.settings; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.telemetry.apm.tracing.APMTracer; import java.security.AccessController; import java.security.PrivilegedAction; @@ -31,7 +32,7 @@ * This class is responsible for APM settings, both for Elasticsearch and the APM Java agent. * The methods could all be static, however they are not in order to make unit testing easier. */ -class APMAgentSettings { +public class APMAgentSettings { private static final Logger LOGGER = LogManager.getLogger(APMAgentSettings.class); @@ -41,7 +42,7 @@ class APMAgentSettings { */ static Map APM_AGENT_DEFAULT_SETTINGS = Map.of("transaction_sample_rate", "0.2"); - void addClusterSettingsListeners(ClusterService clusterService, APMTracer apmTracer) { + public void addClusterSettingsListeners(ClusterService clusterService, APMTracer apmTracer) { final ClusterSettings clusterSettings = clusterService.getClusterSettings(); clusterSettings.addSettingsUpdateConsumer(APM_ENABLED_SETTING, enabled -> { apmTracer.setEnabled(enabled); @@ -59,7 +60,7 @@ void addClusterSettingsListeners(ClusterService clusterService, APMTracer apmTra * Copies APM settings from the provided settings object into the corresponding system properties. * @param settings the settings to apply */ - void syncAgentSystemProperties(Settings settings) { + public void syncAgentSystemProperties(Settings settings) { this.setAgentSetting("recording", Boolean.toString(APM_ENABLED_SETTING.get(settings))); // Apply default values for some system properties. Although we configure @@ -81,7 +82,7 @@ void syncAgentSystemProperties(Settings settings) { * @param value the value to set, or null */ @SuppressForbidden(reason = "Need to be able to manipulate APM agent-related properties to set them dynamically") - void setAgentSetting(String key, String value) { + public void setAgentSetting(String key, String value) { final String completeKey = "elastic.apm." + Objects.requireNonNull(key); AccessController.doPrivileged((PrivilegedAction) () -> { if (value == null || value.isEmpty()) { @@ -107,7 +108,7 @@ void setAgentSetting(String key, String value) { "recording" ); - static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting( + public static final Setting.AffixSetting APM_AGENT_SETTINGS = Setting.prefixKeySetting( APM_SETTING_PREFIX + "agent.", (qualifiedKey) -> { final String[] parts = qualifiedKey.split("\\."); @@ -122,19 +123,19 @@ void setAgentSetting(String key, String value) { } ); - static final Setting> APM_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting( + public static final Setting> APM_TRACING_NAMES_INCLUDE_SETTING = Setting.stringListSetting( APM_SETTING_PREFIX + "names.include", OperatorDynamic, NodeScope ); - static final Setting> APM_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( + public static final Setting> APM_TRACING_NAMES_EXCLUDE_SETTING = Setting.stringListSetting( APM_SETTING_PREFIX + "names.exclude", OperatorDynamic, NodeScope ); - static final Setting> APM_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting( + public static final Setting> APM_TRACING_SANITIZE_FIELD_NAMES = Setting.stringListSetting( APM_SETTING_PREFIX + "sanitize_field_names", List.of( "password", @@ -154,14 +155,17 @@ void setAgentSetting(String key, String value) { NodeScope ); - static final Setting APM_ENABLED_SETTING = Setting.boolSetting( + public static final Setting APM_ENABLED_SETTING = Setting.boolSetting( APM_SETTING_PREFIX + "enabled", false, OperatorDynamic, NodeScope ); - static final Setting APM_SECRET_TOKEN_SETTING = SecureSetting.secureString(APM_SETTING_PREFIX + "secret_token", null); + public static final Setting APM_SECRET_TOKEN_SETTING = SecureSetting.secureString( + APM_SETTING_PREFIX + "secret_token", + null + ); - static final Setting APM_API_KEY_SETTING = SecureSetting.secureString(APM_SETTING_PREFIX + "api_key", null); + public static final Setting APM_API_KEY_SETTING = SecureSetting.secureString(APM_SETTING_PREFIX + "api_key", null); } diff --git a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/tracing/APMTracer.java similarity index 93% rename from modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java rename to modules/apm/src/main/java/org/elasticsearch/telemetry/apm/tracing/APMTracer.java index 09eff0c820745..fc390f1c3d603 100644 --- a/modules/apm/src/main/java/org/elasticsearch/tracing/apm/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/tracing/APMTracer.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing.apm; +package org.elasticsearch.telemetry.apm.tracing; import io.opentelemetry.api.GlobalOpenTelemetry; import io.opentelemetry.api.OpenTelemetry; @@ -34,7 +34,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tracing.SpanId; +import org.elasticsearch.telemetry.tracing.SpanId; import java.security.AccessController; import java.security.PrivilegedAction; @@ -43,20 +43,20 @@ import java.util.Map; import java.util.stream.Collectors; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_SANITIZE_FIELD_NAMES; +import static org.elasticsearch.telemetry.apm.settings.APMAgentSettings.APM_ENABLED_SETTING; +import static org.elasticsearch.telemetry.apm.settings.APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING; +import static org.elasticsearch.telemetry.apm.settings.APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING; +import static org.elasticsearch.telemetry.apm.settings.APMAgentSettings.APM_TRACING_SANITIZE_FIELD_NAMES; /** - * This is an implementation of the {@link org.elasticsearch.tracing.Tracer} interface, which uses + * This is an implementation of the {@link org.elasticsearch.telemetry.tracing.Tracer} interface, which uses * the OpenTelemetry API to capture spans. *

* This module doesn't provide an implementation of the OTel API. Normally that would mean that the * API's default, no-op implementation would be used. However, when the APM Java is attached, it * intercepts the {@link GlobalOpenTelemetry} class and provides its own implementation instead. */ -public class APMTracer extends AbstractLifecycleComponent implements org.elasticsearch.tracing.Tracer { +public class APMTracer extends AbstractLifecycleComponent implements org.elasticsearch.telemetry.tracing.Tracer { private static final Logger logger = LogManager.getLogger(APMTracer.class); @@ -98,7 +98,7 @@ public APMTracer(Settings settings) { this.enabled = APM_ENABLED_SETTING.get(settings); } - void setEnabled(boolean enabled) { + public void setEnabled(boolean enabled) { this.enabled = enabled; if (enabled) { this.services = createApmServices(); @@ -107,17 +107,17 @@ void setEnabled(boolean enabled) { } } - void setIncludeNames(List includeNames) { + public void setIncludeNames(List includeNames) { this.includeNames = includeNames; this.filterAutomaton = buildAutomaton(includeNames, excludeNames); } - void setExcludeNames(List excludeNames) { + public void setExcludeNames(List excludeNames) { this.excludeNames = excludeNames; this.filterAutomaton = buildAutomaton(includeNames, excludeNames); } - void setLabelFilters(List labelFilters) { + public void setLabelFilters(List labelFilters) { this.labelFilters = labelFilters; this.labelFilterAutomaton = buildAutomaton(labelFilters, List.of()); } @@ -324,8 +324,8 @@ private void setSpanAttributes(@Nullable Map spanAttributes, Spa spanBuilder.setSpanKind(SpanKind.INTERNAL); } - spanBuilder.setAttribute(org.elasticsearch.tracing.Tracer.AttributeKeys.NODE_NAME, nodeName); - spanBuilder.setAttribute(org.elasticsearch.tracing.Tracer.AttributeKeys.CLUSTER_NAME, clusterName); + spanBuilder.setAttribute(org.elasticsearch.telemetry.tracing.Tracer.AttributeKeys.NODE_NAME, nodeName); + spanBuilder.setAttribute(org.elasticsearch.telemetry.tracing.Tracer.AttributeKeys.CLUSTER_NAME, clusterName); } private void setSpanAttributes(ThreadContext threadContext, @Nullable Map spanAttributes, SpanBuilder spanBuilder) { diff --git a/modules/apm/src/main/plugin-metadata/plugin-security.policy b/modules/apm/src/main/plugin-metadata/plugin-security.policy index f0032bb291789..b85d3ec05c277 100644 --- a/modules/apm/src/main/plugin-metadata/plugin-security.policy +++ b/modules/apm/src/main/plugin-metadata/plugin-security.policy @@ -26,4 +26,5 @@ grant codeBase "${codebase.elastic-apm-agent}" { permission java.lang.RuntimePermission "getClassLoader"; permission java.io.FilePermission "<>", "read,write"; permission org.elasticsearch.secure_sm.ThreadPermission "modifyArbitraryThreadGroup"; + permission java.net.NetPermission "getProxySelector"; }; diff --git a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMAgentSettingsTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/settings/APMAgentSettingsTests.java similarity index 81% rename from modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMAgentSettingsTests.java rename to modules/apm/src/test/java/org/elasticsearch/telemetry/apm/settings/APMAgentSettingsTests.java index 35328c5dd2461..3d95c9f85f5e5 100644 --- a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMAgentSettingsTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/settings/APMAgentSettingsTests.java @@ -6,13 +6,11 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing.apm; +package org.elasticsearch.telemetry.apm.settings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_AGENT_SETTINGS; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -24,7 +22,7 @@ public class APMAgentSettingsTests extends ESTestCase { */ public void test_whenTracerEnabled_setsRecordingProperty() { APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); apmAgentSettings.syncAgentSystemProperties(settings); verify(apmAgentSettings).setAgentSetting("recording", "true"); @@ -35,7 +33,7 @@ public void test_whenTracerEnabled_setsRecordingProperty() { */ public void test_whenTracerDisabled_setsRecordingProperty() { APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), false).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), false).build(); apmAgentSettings.syncAgentSystemProperties(settings); verify(apmAgentSettings).setAgentSetting("recording", "false"); @@ -47,7 +45,7 @@ public void test_whenTracerDisabled_setsRecordingProperty() { */ public void test_whenTracerCreated_defaultSettingsApplied() { APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); apmAgentSettings.syncAgentSystemProperties(settings); verify(apmAgentSettings).setAgentSetting("transaction_sample_rate", "0.2"); @@ -60,8 +58,8 @@ public void test_whenTracerCreated_defaultSettingsApplied() { public void test_whenTracerCreated_clusterSettingsOverrideDefaults() { APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); Settings settings = Settings.builder() - .put(APM_ENABLED_SETTING.getKey(), true) - .put(APM_AGENT_SETTINGS.getKey() + "transaction_sample_rate", "0.75") + .put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true) + .put(APMAgentSettings.APM_AGENT_SETTINGS.getKey() + "transaction_sample_rate", "0.75") .build(); apmAgentSettings.syncAgentSystemProperties(settings); @@ -79,8 +77,8 @@ public void test_whenTracerCreated_clusterSettingsOverrideDefaults() { public void test_whenTracerCreated_clusterSettingsAlsoApplied() { APMAgentSettings apmAgentSettings = spy(new APMAgentSettings()); Settings settings = Settings.builder() - .put(APM_ENABLED_SETTING.getKey(), true) - .put(APM_AGENT_SETTINGS.getKey() + "span_compression_enabled", "true") + .put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true) + .put(APMAgentSettings.APM_AGENT_SETTINGS.getKey() + "span_compression_enabled", "true") .build(); apmAgentSettings.syncAgentSystemProperties(settings); diff --git a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/tracing/APMTracerTests.java similarity index 88% rename from modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java rename to modules/apm/src/test/java/org/elasticsearch/telemetry/apm/tracing/APMTracerTests.java index e9654229fbb77..ffe719197c59f 100644 --- a/modules/apm/src/test/java/org/elasticsearch/tracing/apm/APMTracerTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/tracing/APMTracerTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing.apm; +package org.elasticsearch.telemetry.apm.tracing; import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; @@ -21,8 +21,9 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.apm.settings.APMAgentSettings; +import org.elasticsearch.telemetry.tracing.SpanId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.tracing.SpanId; import java.time.Instant; import java.util.HashMap; @@ -31,9 +32,6 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Stream; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_ENABLED_SETTING; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING; -import static org.elasticsearch.tracing.apm.APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.hasKey; @@ -54,7 +52,7 @@ public class APMTracerTests extends ESTestCase { * Check that the tracer doesn't create spans when tracing is disabled. */ public void test_onTraceStarted_withTracingDisabled_doesNotStartTrace() { - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), false).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), false).build(); APMTracer apmTracer = buildTracer(settings); apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); @@ -67,8 +65,8 @@ public void test_onTraceStarted_withTracingDisabled_doesNotStartTrace() { */ public void test_onTraceStarted_withSpanNameOmitted_doesNotStartTrace() { Settings settings = Settings.builder() - .put(APM_ENABLED_SETTING.getKey(), true) - .putList(APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), List.of("filtered*")) + .put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true) + .putList(APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), List.of("filtered*")) .build(); APMTracer apmTracer = buildTracer(settings); @@ -81,7 +79,7 @@ public void test_onTraceStarted_withSpanNameOmitted_doesNotStartTrace() { * Check that when a trace is started, the tracer starts a span and records it. */ public void test_onTraceStarted_startsTrace() { - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); @@ -94,7 +92,7 @@ public void test_onTraceStarted_startsTrace() { * Checks that when a trace is started with a specific start time, the tracer starts a span and records it. */ public void test_onTraceStartedWithStartTime_startsTrace() { - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); ThreadContext threadContext = new ThreadContext(settings); @@ -112,7 +110,7 @@ public void test_onTraceStartedWithStartTime_startsTrace() { * Check that when a trace is stopped, the tracer ends the span and removes the record of it. */ public void test_onTraceStopped_stopsTrace() { - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); @@ -129,7 +127,7 @@ public void test_onTraceStopped_stopsTrace() { * check that the local context object is added, however. */ public void test_whenTraceStarted_threadContextIsPopulated() { - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), true).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); ThreadContext threadContext = new ThreadContext(settings); @@ -149,8 +147,8 @@ public void test_whenTraceStarted_andSpanNameIncluded_thenSpanIsStarted() { "name-b*" ); Settings settings = Settings.builder() - .put(APM_ENABLED_SETTING.getKey(), true) - .putList(APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), includePatterns) + .put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true) + .putList(APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), includePatterns) .build(); APMTracer apmTracer = buildTracer(settings); @@ -171,9 +169,9 @@ public void test_whenTraceStarted_andSpanNameIncludedAndExcluded_thenSpanIsNotSt final List includePatterns = List.of("name-a*"); final List excludePatterns = List.of("name-a*"); Settings settings = Settings.builder() - .put(APM_ENABLED_SETTING.getKey(), true) - .putList(APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), includePatterns) - .putList(APM_TRACING_NAMES_EXCLUDE_SETTING.getKey(), excludePatterns) + .put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true) + .putList(APMAgentSettings.APM_TRACING_NAMES_INCLUDE_SETTING.getKey(), includePatterns) + .putList(APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING.getKey(), excludePatterns) .build(); APMTracer apmTracer = buildTracer(settings); @@ -194,8 +192,8 @@ public void test_whenTraceStarted_andSpanNameExcluded_thenSpanIsNotStarted() { "name-b*" ); Settings settings = Settings.builder() - .put(APM_ENABLED_SETTING.getKey(), true) - .putList(APM_TRACING_NAMES_EXCLUDE_SETTING.getKey(), excludePatterns) + .put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true) + .putList(APMAgentSettings.APM_TRACING_NAMES_EXCLUDE_SETTING.getKey(), excludePatterns) .build(); APMTracer apmTracer = buildTracer(settings); @@ -212,7 +210,7 @@ public void test_whenTraceStarted_andSpanNameExcluded_thenSpanIsNotStarted() { * Check that sensitive attributes are not added verbatim to a span, but instead the value is redacted. */ public void test_whenAddingAttributes_thenSensitiveValuesAreRedacted() { - Settings settings = Settings.builder().put(APM_ENABLED_SETTING.getKey(), false).build(); + Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), false).build(); APMTracer apmTracer = buildTracer(settings); CharacterRunAutomaton labelFilterAutomaton = apmTracer.getLabelFilterAutomaton(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 313a6dd459668..3ec77b848d168 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -70,8 +70,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -188,7 +188,8 @@ public Collection createComponents( getClock(), threadPool, threadPool::absoluteTimeInMillis, - errorStoreInitialisationService.get() + errorStoreInitialisationService.get(), + allocationService ) ); dataLifecycleInitialisationService.get().init(); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java index 75713c85df4a1..904b918fe5ae4 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportAction.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.Index; import org.elasticsearch.indices.SystemIndices; @@ -48,7 +49,6 @@ public class DeleteDataStreamTransportAction extends AcknowledgedTransportMaster private static final Logger LOGGER = LogManager.getLogger(DeleteDataStreamTransportAction.class); - private final MetadataDeleteIndexService deleteIndexService; private final SystemIndices systemIndices; @Inject @@ -58,7 +58,6 @@ public DeleteDataStreamTransportAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - MetadataDeleteIndexService deleteIndexService, SystemIndices systemIndices ) { super( @@ -71,7 +70,6 @@ public DeleteDataStreamTransportAction( indexNameExpressionResolver, ThreadPool.Names.SAME ); - this.deleteIndexService = deleteIndexService; this.systemIndices = systemIndices; } @@ -100,11 +98,11 @@ public void onFailure(Exception e) { @Override public ClusterState execute(ClusterState currentState) { return removeDataStream( - deleteIndexService, indexNameExpressionResolver, currentState, request, - ds -> systemIndices.validateDataStreamAccess(ds, threadPool.getThreadContext()) + ds -> systemIndices.validateDataStreamAccess(ds, threadPool.getThreadContext()), + clusterService.getSettings() ); } @@ -122,11 +120,11 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String } static ClusterState removeDataStream( - MetadataDeleteIndexService deleteIndexService, IndexNameExpressionResolver indexNameExpressionResolver, ClusterState currentState, DeleteDataStreamAction.Request request, - Consumer systemDataStreamAccessValidator + Consumer systemDataStreamAccessValidator, + Settings settings ) { List names = getDataStreamNames(indexNameExpressionResolver, currentState, request.getNames(), request.indicesOptions()); Set dataStreams = new HashSet<>(names); @@ -168,7 +166,7 @@ static ClusterState removeDataStream( metadata.removeDataStream(ds); } currentState = ClusterState.builder(currentState).metadata(metadata).build(); - return deleteIndexService.deleteIndices(currentState, backingIndicesToRemove); + return MetadataDeleteIndexService.deleteIndices(currentState, backingIndicesToRemove, settings); } @Override diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index 3311d064b4816..3f07e9ea478df 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; import org.elasticsearch.common.Priority; @@ -52,8 +53,8 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceBackingWithDownsampleIndexExecutor; -import org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask; +import org.elasticsearch.datastreams.lifecycle.downsampling.DeleteSourceAndAddDownsampleIndexExecutor; +import org.elasticsearch.datastreams.lifecycle.downsampling.DeleteSourceAndAddDownsampleToDS; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -80,7 +81,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.WRITE; import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.STARTED; -import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.SUCCESS; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_DOWNSAMPLE_STATUS; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; @@ -133,7 +133,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab private final ClusterService clusterService; private final ThreadPool threadPool; final ResultDeduplicator transportActionsDeduplicator; - final ResultDeduplicator clusterStateChangesDeduplicator; + final ResultDeduplicator clusterStateChangesDeduplicator; private final LongSupplier nowSupplier; private final Clock clock; private final DataStreamLifecycleErrorStore errorStore; @@ -143,7 +143,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab private SchedulerEngine.Job scheduledJob; private final SetOnce scheduler = new SetOnce<>(); private final MasterServiceTaskQueue forceMergeClusterStateUpdateTaskQueue; - private final MasterServiceTaskQueue swapSourceWithDownsampleIndexQueue; + private final MasterServiceTaskQueue swapSourceWithDownsampleIndexQueue; private volatile ByteSizeValue targetMergePolicyFloorSegment; private volatile int targetMergePolicyFactor; @@ -168,7 +168,8 @@ public DataStreamLifecycleService( Clock clock, ThreadPool threadPool, LongSupplier nowSupplier, - DataStreamLifecycleErrorStore errorStore + DataStreamLifecycleErrorStore errorStore, + AllocationService allocationService ) { this.settings = settings; this.client = client; @@ -192,8 +193,8 @@ public DataStreamLifecycleService( ); this.swapSourceWithDownsampleIndexQueue = clusterService.createTaskQueue( "data-stream-lifecycle-swap-source-with-downsample", - Priority.NORMAL, - new ReplaceBackingWithDownsampleIndexExecutor(client) + Priority.URGENT, // urgent priority as this deletes indices + new DeleteSourceAndAddDownsampleIndexExecutor(allocationService) ); } @@ -394,7 +395,6 @@ Set maybeExecuteDownsampling(ClusterState state, DataStream dataStream, L } String indexName = index.getName(); - IndexMetadata.DownsampleTaskStatus backingIndexDownsamplingStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndexMeta.getSettings()); String downsamplingSourceIndex = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(backingIndexMeta.getSettings()); // if the current index is not a downsample we want to mark the index as read-only before proceeding with downsampling @@ -402,21 +402,7 @@ Set maybeExecuteDownsampling(ClusterState state, DataStream dataStream, L && state.blocks().indexBlocked(ClusterBlockLevel.WRITE, indexName) == false) { affectedIndices.add(index); addIndexBlockOnce(indexName); - } else if (org.elasticsearch.common.Strings.hasText(downsamplingSourceIndex) - && backingIndexDownsamplingStatus.equals(SUCCESS)) { - // if the backing index is a downsample index itself, let's check if its source index still exists as we must delete it - IndexMetadata downsampleSourceIndex = metadata.index(downsamplingSourceIndex); - if (downsampleSourceIndex != null) { - // we mark the backing index as affected as we don't want subsequent operations that might change its state to - // be performed, as we might lose the way to identify that we must delete its replacement source index - affectedIndices.add(index); - // delete downsampling source index (that's not part of the data stream anymore) before doing any more - // downsampling - deleteIndexOnce(downsamplingSourceIndex, "replacement with its downsampled index in the data stream"); - } - } - - if (affectedIndices.contains(index) == false) { + } else { // we're not performing any operation for this index which means that it: // - has matching downsample rounds // - is read-only @@ -582,7 +568,10 @@ private Set evaluateDownsampleStatus( */ private void replaceBackingIndexWithDownsampleIndexOnce(DataStream dataStream, String backingIndexName, String downsampleIndexName) { clusterStateChangesDeduplicator.executeOnce( - new ReplaceSourceWithDownsampleIndexTask(dataStream.getName(), backingIndexName, downsampleIndexName, null), + // we use a String key here as otherwise it's ... awkward as we have to create the DeleteSourceAndAddDownsampleToDS as the + // key _without_ a listener (passing in null) and then below we create it again with the `reqListener`. We're using a String + // as it seems to be clearer. + "dsl-replace-" + dataStream.getName() + "-" + backingIndexName + "-" + downsampleIndexName, new ErrorRecordingActionListener( backingIndexName, errorStore, @@ -601,8 +590,14 @@ private void replaceBackingIndexWithDownsampleIndexOnce(DataStream dataStream, S dataStream ); swapSourceWithDownsampleIndexQueue.submitTask( - "data-stream-lifecycle-replace-source[" + backingIndexName + "]-with-[" + downsampleIndexName + "]", - new ReplaceSourceWithDownsampleIndexTask(dataStream.getName(), backingIndexName, downsampleIndexName, reqListener), + "data-stream-lifecycle-delete-source[" + backingIndexName + "]-add-to-datastream-[" + downsampleIndexName + "]", + new DeleteSourceAndAddDownsampleToDS( + settings, + dataStream.getName(), + backingIndexName, + downsampleIndexName, + reqListener + ), null ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutor.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutor.java new file mode 100644 index 0000000000000..bf31146d711b2 --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutor.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.downsampling; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.SimpleBatchedExecutor; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.core.Tuple; + +import static org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionListener.rerouteCompletionIsNotRequired; + +/** + * Cluster service task (batched) executor that deletes the source index and adds its downsample index to the data stream. + */ +public class DeleteSourceAndAddDownsampleIndexExecutor extends SimpleBatchedExecutor { + private static final Logger LOGGER = LogManager.getLogger(DeleteSourceAndAddDownsampleToDS.class); + private final AllocationService allocationService; + + public DeleteSourceAndAddDownsampleIndexExecutor(AllocationService allocationService) { + this.allocationService = allocationService; + } + + @Override + public Tuple executeTask(DeleteSourceAndAddDownsampleToDS task, ClusterState clusterState) throws Exception { + return Tuple.tuple(task.execute(clusterState), null); + } + + @Override + public void taskSucceeded(DeleteSourceAndAddDownsampleToDS task, Void unused) { + LOGGER.trace( + "Updated cluster state and replaced index [{}] with index [{}] in data stream [{}]. Index [{}] was deleted", + task.getSourceBackingIndex(), + task.getDownsampleIndex(), + task.getDataStreamName(), + task.getSourceBackingIndex() + ); + task.getListener().onResponse(null); + } + + @Override + public ClusterState afterBatchExecution(ClusterState clusterState, boolean clusterStateChanged) { + if (clusterStateChanged) { + return allocationService.reroute( + clusterState, + "deleted indices", + rerouteCompletionIsNotRequired() // it is not required to balance shard to report index deletion success + ); + } + return clusterState; + } +} diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDS.java similarity index 65% rename from modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java rename to modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDS.java index 70cf57456e099..3e49499740349 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDS.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -24,27 +25,31 @@ import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; /** - * Cluster state task that replaces a source index in a data stream with its downsample index. + * Cluster state task that deletes a source index in a data stream and adds its downsample index. * In the process it will configure the origination date for the downsample index (so it can * have a correct generation time). */ -public class ReplaceSourceWithDownsampleIndexTask implements ClusterStateTaskListener { - private static final Logger LOGGER = LogManager.getLogger(ReplaceSourceWithDownsampleIndexTask.class); +public class DeleteSourceAndAddDownsampleToDS implements ClusterStateTaskListener { + private static final Logger LOGGER = LogManager.getLogger(DeleteSourceAndAddDownsampleToDS.class); + private final Settings settings; private ActionListener listener; private final String dataStreamName; private final String sourceBackingIndex; private final String downsampleIndex; - public ReplaceSourceWithDownsampleIndexTask( + public DeleteSourceAndAddDownsampleToDS( + Settings settings, String dataStreamName, String sourceBackingIndex, String downsampleIndex, ActionListener listener ) { + this.settings = settings; this.dataStreamName = dataStreamName; this.sourceBackingIndex = sourceBackingIndex; this.downsampleIndex = downsampleIndex; @@ -53,17 +58,16 @@ public ReplaceSourceWithDownsampleIndexTask( ClusterState execute(ClusterState state) { LOGGER.trace( - "Updating cluster state to replace index [{}] with [{}] in data stream [{}]", + "Updating cluster state to replace and delete index [{}] with [{}] in data stream [{}]", sourceBackingIndex, downsampleIndex, dataStreamName ); - IndexAbstraction sourceIndexAbstraction = state.metadata().getIndicesLookup().get(sourceBackingIndex); IndexMetadata downsampleIndexMeta = state.metadata().index(downsampleIndex); if (downsampleIndexMeta == null) { // the downsample index doesn't exist anymore so nothing to replace here LOGGER.trace( - "Received request replace index [{}] with [{}] in data stream [{}] but the replacement index [{}] doesn't exist." + "Received request to replace index [{}] with [{}] in data stream [{}] but the replacement index [{}] doesn't exist." + "Nothing to do here.", sourceBackingIndex, downsampleIndex, @@ -72,9 +76,9 @@ ClusterState execute(ClusterState state) { ); return state; } - IndexMetadata sourceIndexMeta = state.metadata().index(sourceBackingIndex); - DataStream dataStream = state.metadata().dataStreams().get(dataStreamName); + IndexAbstraction sourceIndexAbstraction = state.metadata().getIndicesLookup().get(sourceBackingIndex); if (sourceIndexAbstraction == null) { + DataStream dataStream = state.metadata().dataStreams().get(dataStreamName); // index was deleted in the meantime, so let's check if we can make sure the downsample index ends up in the // data stream (if not already there) if (dataStream != null @@ -91,8 +95,24 @@ ClusterState execute(ClusterState state) { return ClusterState.builder(state).metadata(newMetaData).build(); } } else { - // the source index exists DataStream sourceParentDataStream = sourceIndexAbstraction.getParentDataStream(); + if (sourceParentDataStream != null && sourceParentDataStream.getWriteIndex().getName().equals(sourceBackingIndex)) { + String errorMessage = String.format( + Locale.ROOT, + "index [%s] is the write index for data stream [%s] and cannot be replaced", + sourceBackingIndex, + sourceParentDataStream.getName() + ); + throw new IllegalStateException(errorMessage); + } + + IndexMetadata sourceIndexMeta = state.metadata().index(sourceBackingIndex); + assert sourceIndexMeta != null + : "the source index abstraction exists in the indices lookup, so the index metadata must " + + "exist in the same cluster state metadata"; + // the source index exists so let's start by deleting it + state = MetadataDeleteIndexService.deleteIndices(state, Set.of(sourceIndexMeta.getIndex()), settings); + DataStream dataStream = state.metadata().dataStreams().get(dataStreamName); if (sourceParentDataStream != null) { assert sourceParentDataStream.getName().equals(dataStreamName) : "the backing index must be part of the provided data " @@ -101,49 +121,14 @@ ClusterState execute(ClusterState state) { + "] but it is instead part of data stream [" + sourceParentDataStream.getName() + "]"; - if (sourceParentDataStream.getWriteIndex().getName().equals(sourceBackingIndex)) { - String errorMessage = String.format( - Locale.ROOT, - "index [%s] is the write index for data stream [%s] and cannot be replaced", - sourceBackingIndex, - sourceParentDataStream.getName() - ); - throw new IllegalStateException(errorMessage); - } - if (sourceIndexMeta != null) { - // both indices exist, let's copy the origination date from the source index to the downsample index - Metadata.Builder newMetaData = Metadata.builder(state.getMetadata()); - TimeValue generationLifecycleDate = dataStream.getGenerationLifecycleDate(sourceIndexMeta); - assert generationLifecycleDate != null : "write index must never be downsampled, or replaced"; - IndexMetadata updatedDownsampleMetadata = copyDataStreamLifecycleState( - sourceIndexMeta, - downsampleIndexMeta, - generationLifecycleDate.millis() - ); - - newMetaData.put(updatedDownsampleMetadata, true); - // replace source with downsample - newMetaData.put(dataStream.replaceBackingIndex(sourceIndexMeta.getIndex(), downsampleIndexMeta.getIndex())); - return ClusterState.builder(state).metadata(newMetaData).build(); - } + // both indices exist, let's copy the origination date from the source index to the downsample index + return addDownsampleIndexToDataStream(state, dataStream, sourceIndexMeta, downsampleIndexMeta); } else { // the source index is not part of a data stream, so let's check if we can make sure the downsample index ends up in the // data stream if (dataStream != null && dataStream.getIndices().stream().filter(index -> index.getName().equals(downsampleIndex)).findAny().isEmpty()) { - Metadata.Builder newMetaData = Metadata.builder(state.getMetadata()); - TimeValue generationLifecycleDate = dataStream.getGenerationLifecycleDate(sourceIndexMeta); - assert generationLifecycleDate != null : "write index must never be downsampled, or replaced"; - - IndexMetadata updatedDownsampleMetadata = copyDataStreamLifecycleState( - sourceIndexMeta, - downsampleIndexMeta, - generationLifecycleDate.millis() - ); - newMetaData.put(updatedDownsampleMetadata, true); - // add downsample index to data stream - newMetaData.put(dataStream.addBackingIndex(state.metadata(), downsampleIndexMeta.getIndex())); - return ClusterState.builder(state).metadata(newMetaData).build(); + return addDownsampleIndexToDataStream(state, dataStream, sourceIndexMeta, downsampleIndexMeta); } } } @@ -151,6 +136,34 @@ ClusterState execute(ClusterState state) { return state; } + /** + * This creates a new {@link ClusterState} with an updated data stream that contains the provided downsample index. + * This method is private as it fits into the flow of this cluster state task - i.e. the source index has already been removed from + * the provided state. + */ + private static ClusterState addDownsampleIndexToDataStream( + ClusterState state, + DataStream dataStream, + IndexMetadata sourceIndexMeta, + IndexMetadata downsampleIndexMeta + ) { + Metadata.Builder newMetaData = Metadata.builder(state.getMetadata()); + TimeValue generationLifecycleDate = dataStream.getGenerationLifecycleDate(sourceIndexMeta); + // the generation lifecycle date is null only for the write index + // we fail already if attempting to delete/downsample the write index, so the following assertion just re-inforces that + assert generationLifecycleDate != null : "write index must never be downsampled, or replaced"; + IndexMetadata updatedDownsampleMetadata = copyDataStreamLifecycleState( + sourceIndexMeta, + downsampleIndexMeta, + generationLifecycleDate.millis() + ); + + newMetaData.put(updatedDownsampleMetadata, true); + // we deleted the source already so let's add the downsample index to the data stream + newMetaData.put(dataStream.addBackingIndex(state.metadata(), downsampleIndexMeta.getIndex())); + return ClusterState.builder(state).metadata(newMetaData).build(); + } + /** * Copies the data stream lifecycle state information from the source index to the destination. * This ensures the destination index will have a generation time by setting the {@link IndexSettings#LIFECYCLE_ORIGINATION_DATE} and @@ -215,7 +228,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - ReplaceSourceWithDownsampleIndexTask that = (ReplaceSourceWithDownsampleIndexTask) o; + DeleteSourceAndAddDownsampleToDS that = (DeleteSourceAndAddDownsampleToDS) o; return Objects.equals(dataStreamName, that.dataStreamName) && Objects.equals(sourceBackingIndex, that.sourceBackingIndex) && Objects.equals(downsampleIndex, that.downsampleIndex); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java deleted file mode 100644 index 625c1f71a92db..0000000000000 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.downsampling; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.SimpleBatchedExecutor; -import org.elasticsearch.common.Strings; -import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.snapshots.SnapshotInProgressException; - -/** - * Cluster service task (batched) executor that executes the replacement of data stream backing index with its - * downsampled index. - * After the task is executed the executor issues a delete API call for the source index however, it doesn't - * hold up the task listener (nb we notify the listener before we call the delete API so we don't introduce - * weird partial failure scenarios - if the delete API fails the - * {@link org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService} will retry on the next run so the source index will get - * deleted) - */ -public class ReplaceBackingWithDownsampleIndexExecutor extends SimpleBatchedExecutor { - private static final Logger LOGGER = LogManager.getLogger(ReplaceSourceWithDownsampleIndexTask.class); - private final Client client; - - public ReplaceBackingWithDownsampleIndexExecutor(Client client) { - this.client = client; - } - - @Override - public Tuple executeTask(ReplaceSourceWithDownsampleIndexTask task, ClusterState clusterState) throws Exception { - return Tuple.tuple(task.execute(clusterState), null); - } - - @Override - public void taskSucceeded(ReplaceSourceWithDownsampleIndexTask task, Void unused) { - LOGGER.trace( - "Updated cluster state and replaced index [{}] with index [{}] in data stream [{}]", - task.getSourceBackingIndex(), - task.getDownsampleIndex(), - task.getDataStreamName() - ); - task.getListener().onResponse(null); - - LOGGER.trace( - "Issuing request to delete index [{}] as it's not part of data stream [{}] anymore", - task.getSourceBackingIndex(), - task.getDataStreamName() - ); - // chain an optimistic delete of the source index call here (if it fails it'll be retried by the data stream lifecycle loop) - client.admin().indices().delete(new DeleteIndexRequest(task.getSourceBackingIndex()), new ActionListener<>() { - @Override - public void onResponse(AcknowledgedResponse acknowledgedResponse) { - if (acknowledgedResponse.isAcknowledged()) { - LOGGER.info( - "Data stream lifecycle successfully deleted index [{}] due to being replaced by the downsampled index [{}] in" - + " data stream [{}]", - task.getSourceBackingIndex(), - task.getDownsampleIndex(), - task.getDataStreamName() - ); - } else { - LOGGER.trace( - "The delete request for index [{}] was not acknowledged. Data stream lifecycle service will retry on the" - + " next run if the index still exists", - task.getSourceBackingIndex() - ); - } - } - - @Override - public void onFailure(Exception e) { - if (e instanceof IndexNotFoundException) { - // index was already deleted, treat this as a success - LOGGER.trace("Did not delete index [{}] as it was already deleted", task.getSourceBackingIndex()); - return; - } - - if (e instanceof SnapshotInProgressException) { - LOGGER.info( - "Data stream lifecycle is unable to delete index [{}] because it's part of an ongoing snapshot. Retrying on " - + "the next data stream lifecycle run", - task.getSourceBackingIndex() - ); - } else { - LOGGER.error( - () -> Strings.format( - "Data stream lifecycle encountered an error trying to delete index [%s]. It will retry on its next run.", - task.getSourceBackingIndex() - ), - e - ); - } - } - }); - } -} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java index a9bb94658b890..e55ff022693b3 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamGetWriteIndexTests.java @@ -270,7 +270,6 @@ public void setup() throws Exception { MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService( clusterService, indicesService, - null, xContentRegistry() ); rolloverService = new MetadataRolloverService( diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java index 59aa4c17387c8..29c88b7f75463 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/action/DeleteDataStreamTransportActionTests.java @@ -14,8 +14,6 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; @@ -31,15 +29,11 @@ import java.util.Collections; import java.util.List; -import java.util.Set; import java.util.function.Consumer; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class DeleteDataStreamTransportActionTests extends ESTestCase { @@ -53,7 +47,7 @@ public void testDeleteDataStream() { ClusterState cs = DataStreamTestHelper.getClusterStateWithDataStreams(List.of(new Tuple<>(dataStreamName, 2)), otherIndices); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); - ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, cs, req, validator); + ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState.metadata().dataStreams().size(), equalTo(0)); assertThat(newState.metadata().indices().size(), equalTo(otherIndices.size())); for (String indexName : otherIndices) { @@ -74,7 +68,7 @@ public void testDeleteMultipleDataStreams() { ); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { "ba*", "eggplant" }); - ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, cs, req, validator); + ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState.metadata().dataStreams().size(), equalTo(1)); DataStream remainingDataStream = newState.metadata().dataStreams().get(dataStreamNames[0]); assertNotNull(remainingDataStream); @@ -100,7 +94,7 @@ public void testDeleteSnapshottingDataStream() { DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName }); SnapshotInProgressException e = expectThrows( SnapshotInProgressException.class, - () -> DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, snapshotCs, req, validator) + () -> DeleteDataStreamTransportAction.removeDataStream(iner, snapshotCs, req, validator, Settings.EMPTY) ); assertThat( @@ -146,16 +140,16 @@ public void testDeleteNonexistentDataStream() { expectThrows( ResourceNotFoundException.class, () -> DeleteDataStreamTransportAction.removeDataStream( - getMetadataDeleteIndexService(), iner, cs, new DeleteDataStreamAction.Request(new String[] { dataStreamName }), - validator + validator, + Settings.EMPTY ) ); DeleteDataStreamAction.Request req = new DeleteDataStreamAction.Request(new String[] { dataStreamName + "*" }); - ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(getMetadataDeleteIndexService(), iner, cs, req, validator); + ClusterState newState = DeleteDataStreamTransportAction.removeDataStream(iner, cs, req, validator, Settings.EMPTY); assertThat(newState, sameInstance(cs)); assertThat(newState.metadata().dataStreams().size(), equalTo(cs.metadata().dataStreams().size())); assertThat( @@ -164,22 +158,4 @@ public void testDeleteNonexistentDataStream() { ); } - @SuppressWarnings("unchecked") - private static MetadataDeleteIndexService getMetadataDeleteIndexService() { - MetadataDeleteIndexService s = mock(MetadataDeleteIndexService.class); - when(s.deleteIndices(any(ClusterState.class), any(Set.class))).thenAnswer(mockInvocation -> { - ClusterState currentState = (ClusterState) mockInvocation.getArguments()[0]; - Set indices = (Set) mockInvocation.getArguments()[1]; - - final Metadata.Builder b = Metadata.builder(currentState.metadata()); - for (Index index : indices) { - b.remove(index.getName()); - } - - return ClusterState.builder(currentState).metadata(b.build()).build(); - }); - - return s; - } - } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 5b73d94be578a..712ad07bc0634 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -29,6 +29,8 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.DataStream; @@ -44,6 +46,11 @@ import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.ClusterSettings; @@ -57,9 +64,11 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.snapshots.EmptySnapshotsInfoService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportRequest; @@ -70,6 +79,7 @@ import java.time.Clock; import java.time.Instant; import java.time.ZoneId; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -96,7 +106,6 @@ import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -129,6 +138,18 @@ public void setupServices() { clientSeenRequests = new CopyOnWriteArrayList<>(); client = getTransportRequestsRecordingClient(); + AllocationService allocationService = new AllocationService( + new AllocationDeciders( + new HashSet<>( + Arrays.asList(new SameShardAllocationDecider(clusterSettings), new ReplicaAfterPrimaryActiveAllocationDecider()) + ) + ), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE, + TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY + ); dataStreamLifecycleService = new DataStreamLifecycleService( Settings.EMPTY, client, @@ -136,7 +157,8 @@ public void setupServices() { clock, threadPool, () -> now, - new DataStreamLifecycleErrorStore() + new DataStreamLifecycleErrorStore(), + allocationService ); clientDelegate = null; dataStreamLifecycleService.init(); @@ -1027,9 +1049,8 @@ public void testDownsampling() throws Exception { } // on this run, as downsampling is complete we expect to trigger the {@link - // org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask} - // cluster service task and replace the source index with the downsample index in the data stream - // we also expect a delete request for the source index to be witnessed + // org.elasticsearch.datastreams.lifecycle.downsampling.DeleteSourceAndAddDownsampleToDS} + // cluster service task and delete the source index whilst adding the downsample index in the data stream affectedIndices = dataStreamLifecycleService.maybeExecuteDownsampling(clusterService.state(), dataStream, List.of(firstGenIndex)); assertThat(affectedIndices, is(Set.of(firstGenIndex))); assertBusy(() -> { @@ -1038,51 +1059,13 @@ public void testDownsampling() throws Exception { // the downsample index must be part of the data stream assertThat(downsample.getParentDataStream(), is(notNullValue())); assertThat(downsample.getParentDataStream().getName(), is(dataStreamName)); - // the source index must not be part of the data stream + // the source index was deleted IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndexName); - assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + assertThat(sourceIndexAbstraction, is(nullValue())); - // {@link ReplaceBackingWithDownsampleIndexExecutor} triggers a delete reuqest for the backing index when the cluster state - // is successfully updated - assertThat(clientSeenRequests.size(), is(3)); - assertThat(clientSeenRequests.get(2), instanceOf(DeleteIndexRequest.class)); + // no further requests should be triggered + assertThat(clientSeenRequests.size(), is(2)); }, 30, TimeUnit.SECONDS); - - // NOTE from now on we need to refresh the state and dataStream variables as the data stream lifecycle service updated the - // cluster state in the cluster service via {@link ReplaceBackingWithDownsampleIndexExecutor} - dataStream = clusterService.state().metadata().dataStreams().get(dataStreamName); - state = clusterService.state(); - - // before we remove the backing index (to "implement" the above issued delete request) let's issue another data stream service - // donwsampling run as the service should detect that the index has not been deleted and issue a request itself - - // note that we call the downsampling with the downsampled index from now on, as IT is the one that's part of the datastream now - IndexMetadata downsampleMeta = clusterService.state().metadata().index(downsampleIndexName); - affectedIndices = dataStreamLifecycleService.maybeExecuteDownsampling( - clusterService.state(), - dataStream, - List.of(downsampleMeta.getIndex()) - ); - assertThat(affectedIndices, is(Set.of(downsampleMeta.getIndex()))); - assertThat(clientSeenRequests.size(), is(4)); - assertThat(clientSeenRequests.get(3), instanceOf(DeleteIndexRequest.class)); - - { - // let's remove the backing index (as delete was successful ... say) - Metadata.Builder metadataBuilder = Metadata.builder(state.metadata()); - metadataBuilder.remove(firstGenIndexName); - state = ClusterState.builder(state).metadata(metadataBuilder).build(); - setState(clusterService, state); - } - - // downsample was successful for this index, nothing else to have been executed here (still 4 witnessed reuqests as before) - affectedIndices = dataStreamLifecycleService.maybeExecuteDownsampling( - clusterService.state(), - dataStream, - List.of(downsampleMeta.getIndex()) - ); - assertThat(affectedIndices, is(empty())); - assertThat(clientSeenRequests.size(), is(4)); } public void testDownsamplingWhenTargetIndexNameClashYieldsException() throws Exception { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutorTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutorTests.java new file mode 100644 index 0000000000000..e21fda0fe579b --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleIndexExecutorTests.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.downsampling; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.elasticsearch.cluster.routing.allocation.allocator.AllocationActionListener.rerouteCompletionIsNotRequired; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class DeleteSourceAndAddDownsampleIndexExecutorTests extends ESTestCase { + + public void testExecutorNotifiesListenerAndReroutesAllocationService() { + String dataStreamName = randomAlphaOfLengthBetween(10, 100); + String sourceIndex = randomAlphaOfLengthBetween(10, 100); + String downsampleIndex = randomAlphaOfLengthBetween(10, 100); + + AllocationService allocationService = mock(AllocationService.class); + DeleteSourceAndAddDownsampleIndexExecutor executor = new DeleteSourceAndAddDownsampleIndexExecutor(allocationService); + + AtomicBoolean taskListenerCalled = new AtomicBoolean(false); + executor.taskSucceeded( + new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, sourceIndex, downsampleIndex, new ActionListener<>() { + @Override + public void onResponse(Void unused) { + taskListenerCalled.set(true); + } + + @Override + public void onFailure(Exception e) { + logger.error(e.getMessage(), e); + fail("unexpected exception: " + e.getMessage()); + } + }), + null + ); + assertThat(taskListenerCalled.get(), is(true)); + + ClusterState state = ClusterState.EMPTY_STATE; + executor.afterBatchExecution(state, true); + verify(allocationService).reroute(state, "deleted indices", rerouteCompletionIsNotRequired()); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDSTests.java similarity index 78% rename from modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java rename to modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDSTests.java index c3d1262c72dce..062d502ee7029 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/DeleteSourceAndAddDownsampleToDSTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.datastreams.lifecycle.downsampling; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -17,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; @@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class ReplaceSourceWithDownsampleIndexTaskTests extends ESTestCase { +public class DeleteSourceAndAddDownsampleToDSTests extends ESTestCase { private long now; @@ -58,7 +58,8 @@ public void testDownsampleIndexMissingIsNoOp() { ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); String firstGeneration = DataStream.getDefaultBackingIndexName(dataStreamName, 1); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask( + ClusterState newState = new DeleteSourceAndAddDownsampleToDS( + Settings.EMPTY, dataStreamName, firstGeneration, "downsample-1s-" + firstGeneration, @@ -94,9 +95,8 @@ public void testDownsampleIsAddedToDSEvenIfSourceDeleted() { builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -129,7 +129,9 @@ public void testSourceIndexIsWriteIndexThrowsException() { IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> new ReplaceSourceWithDownsampleIndexTask(dataStreamName, writeIndex, downsampleIndex, null).execute(previousState) + () -> new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, writeIndex, downsampleIndex, null).execute( + previousState + ) ); assertThat( @@ -138,7 +140,7 @@ public void testSourceIndexIsWriteIndexThrowsException() { ); } - public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { + public void testSourceIsDeleteAndDownsampleOriginationDateIsConfigured() { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); int numBackingIndices = 3; Metadata.Builder builder = Metadata.builder(); @@ -162,13 +164,14 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { // let's add some lifecycle custom metadata to the first generation index IndexMetadata indexMetadata = previousState.metadata().index(firstGenIndex); + RolloverInfo rolloverInfo = indexMetadata.getRolloverInfos().get(dataStreamName); + IndexMetadata.Builder firstGenBuilder = IndexMetadata.builder(indexMetadata) .putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, Map.of(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY, String.valueOf(now))); Metadata.Builder metaBuilder = Metadata.builder(previousState.metadata()).put(firstGenBuilder); previousState = ClusterState.builder(previousState).metadata(metaBuilder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -176,16 +179,11 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { // the downsample index is part of the data stream assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); - // the source index is NOT part of the data stream + // the source index is deleted IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndex); - assertThat(sourceIndexAbstraction, is(notNullValue())); - assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + assertThat(sourceIndexAbstraction, is(nullValue())); // let's check the downsample index has the origination date configured to the source index rollover time - IndexMetadata firstGenMeta = newState.metadata().index(firstGenIndex); - RolloverInfo rolloverInfo = firstGenMeta.getRolloverInfos().get(dataStreamName); - assertThat(rolloverInfo, is(notNullValue())); - IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(rolloverInfo.getTime())); assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), notNullValue()); @@ -220,9 +218,8 @@ public void testSourceWithoutLifecycleMetaAndDestWithOriginationDateAlreadyConfi builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -230,16 +227,15 @@ public void testSourceWithoutLifecycleMetaAndDestWithOriginationDateAlreadyConfi // the downsample index is part of the data stream assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); - // the source index is NOT part of the data stream + // the source index was deleted IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndex); - assertThat(sourceIndexAbstraction, is(notNullValue())); - assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + assertThat(sourceIndexAbstraction, is(nullValue())); IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(downsampleOriginationDate)); } - public void testSourceIndexIsNotPartOfDSAnymore() { + public void testSourceIndexIsDeleteEvenIfNotPartOfDSAnymore() { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); int numBackingIndices = 3; Metadata.Builder builder = Metadata.builder(); @@ -263,9 +259,8 @@ public void testSourceIndexIsNotPartOfDSAnymore() { builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); - ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( - previousState - ); + ClusterState newState = new DeleteSourceAndAddDownsampleToDS(Settings.EMPTY, dataStreamName, firstGenIndex, downsampleIndex, null) + .execute(previousState); IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); assertThat(downsampleIndexAbstraction, is(notNullValue())); @@ -273,43 +268,6 @@ public void testSourceIndexIsNotPartOfDSAnymore() { // the downsample index is part of the data stream assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); - // origination date and the lifecycle metadata is configured even if the source index is not part of the data stream anymore - IndexMetadata firstGenMeta = newState.metadata().index(firstGenIndex); - RolloverInfo rolloverInfo = firstGenMeta.getRolloverInfos().get(dataStreamName); - assertThat(rolloverInfo, is(notNullValue())); - } - - public void testListenersIsNonConsideredInEquals() { - // the task is used as a key in a result deduplicator ({@link ResultDeduplicator}) map and the listener must not - // be taken into account - - String dataStreamName = randomAlphaOfLengthBetween(10, 100); - String sourceBackingIndex = randomAlphaOfLengthBetween(10, 100); - String downsampleIndex = randomAlphaOfLengthBetween(10, 100); - ReplaceSourceWithDownsampleIndexTask withoutListener = new ReplaceSourceWithDownsampleIndexTask( - dataStreamName, - sourceBackingIndex, - downsampleIndex, - null - ); - - ReplaceSourceWithDownsampleIndexTask withListener = new ReplaceSourceWithDownsampleIndexTask( - dataStreamName, - sourceBackingIndex, - downsampleIndex, - new ActionListener<>() { - @Override - public void onResponse(Void unused) { - - } - - @Override - public void onFailure(Exception e) { - - } - } - ); - - assertThat(withoutListener.equals(withListener), is(true)); + assertThat(newState.metadata().getIndicesLookup().get(firstGenIndex), is(nullValue())); } } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutorTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutorTests.java deleted file mode 100644 index ba501a17bbcf4..0000000000000 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutorTests.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.datastreams.lifecycle.downsampling; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; -import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; -import org.elasticsearch.client.internal.Client; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.client.NoOpClient; - -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.is; - -public class ReplaceBackingWithDownsampleIndexExecutorTests extends ESTestCase { - - public void testExecutorDeletesTheSourceIndexWhenTaskSucceeds() { - String dataStreamName = randomAlphaOfLengthBetween(10, 100); - String sourceIndex = randomAlphaOfLengthBetween(10, 100); - String downsampleIndex = randomAlphaOfLengthBetween(10, 100); - - try (Client client = new NoOpClient(getTestName()) { - @Override - protected void doExecute( - ActionType action, - Request request, - ActionListener listener - ) { - assertThat(action.name(), is(DeleteIndexAction.NAME)); - assertTrue(request instanceof DeleteIndexRequest); - DeleteIndexRequest deleteIndexRequest = (DeleteIndexRequest) request; - assertThat(deleteIndexRequest.indices().length, is(1)); - assertThat(deleteIndexRequest.indices()[0], is(sourceIndex)); - } - }) { - ReplaceBackingWithDownsampleIndexExecutor executor = new ReplaceBackingWithDownsampleIndexExecutor(client); - - AtomicBoolean taskListenerCalled = new AtomicBoolean(false); - executor.taskSucceeded( - new ReplaceSourceWithDownsampleIndexTask(dataStreamName, sourceIndex, downsampleIndex, new ActionListener() { - @Override - public void onResponse(Void unused) { - taskListenerCalled.set(true); - } - - @Override - public void onFailure(Exception e) { - logger.error(e.getMessage(), e); - fail("unexpected exception: " + e.getMessage()); - } - }), - null - ); - assertThat(taskListenerCalled.get(), is(true)); - } - } - - public void testExecutorCallsTaskListenerEvenIfDeteleFails() { - String dataStreamName = randomAlphaOfLengthBetween(10, 100); - String sourceIndex = randomAlphaOfLengthBetween(10, 100); - String downsampleIndex = randomAlphaOfLengthBetween(10, 100); - - try (Client client = new NoOpClient(getTestName()) { - @Override - protected void doExecute( - ActionType action, - Request request, - ActionListener listener - ) { - listener.onFailure(new IllegalStateException("simulating a failure to delete index " + sourceIndex)); - } - }) { - ReplaceBackingWithDownsampleIndexExecutor executor = new ReplaceBackingWithDownsampleIndexExecutor(client); - - AtomicBoolean taskListenerCalled = new AtomicBoolean(false); - executor.taskSucceeded( - new ReplaceSourceWithDownsampleIndexTask(dataStreamName, sourceIndex, downsampleIndex, new ActionListener() { - @Override - public void onResponse(Void unused) { - taskListenerCalled.set(true); - } - - @Override - public void onFailure(Exception e) { - logger.error(e.getMessage(), e); - fail("unexpected exception: " + e.getMessage()); - } - }), - null - ); - assertThat(taskListenerCalled.get(), is(true)); - } - } -} diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml index f254911cef5fe..538e362ed9ec0 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/240_logs_ecs_mappings.yml @@ -21,6 +21,9 @@ setup: --- Test Elastic Agent log ECS mappings: + - skip: + version: all + reason: https://github.com/elastic/elasticsearch/issues/97795 - do: indices.get_data_stream: name: logs-generic-default diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index 03607b908d375..fd40643281da2 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -48,8 +48,8 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; diff --git a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java index 02c17977daf6a..e7d6d127174ec 100644 --- a/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java +++ b/modules/lang-expression/src/internalClusterTest/java/org/elasticsearch/script/expression/MoreExpressionIT.java @@ -502,7 +502,6 @@ public void testSpecialValueVariable() throws Exception { assertThat(stats.getAvg(), equalTo(3.0)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99156") public void testStringSpecialValueVariable() throws Exception { // i.e. expression script for term aggregations, which is not allowed assertAcked(indicesAdmin().prepareCreate("test").setMapping("text", "type=keyword").get()); diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java index ec9435d9386b5..df08c0908e182 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionAggregationScript.java @@ -83,7 +83,7 @@ public void setNextAggregationValue(Object value) { // _value isn't used in script if specialValue == null if (specialValue != null) { if (value instanceof Number) { - specialValue.setValue(((Number) value).doubleValue()); + specialValue.setValue(leaf, ((Number) value).doubleValue()); } else { throw new GeneralScriptException("Cannot use expression with text variable using " + exprScript); } diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstDoubleValueSource.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstDoubleValueSource.java index 50a70fccdcd44..903ddaf72340e 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstDoubleValueSource.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ReplaceableConstDoubleValueSource.java @@ -15,20 +15,21 @@ import org.apache.lucene.search.IndexSearcher; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; /** * A {@link DoubleValuesSource} which has a stub {@link DoubleValues} that holds a dynamically replaceable constant double. */ final class ReplaceableConstDoubleValueSource extends DoubleValuesSource { - final ReplaceableConstDoubleValues fv; - ReplaceableConstDoubleValueSource() { - fv = new ReplaceableConstDoubleValues(); - } + private final Map specialValues = new ConcurrentHashMap<>(); @Override public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException { - return fv; + ReplaceableConstDoubleValues replaceableConstDoubleValues = new ReplaceableConstDoubleValues(); + specialValues.put(ctx, replaceableConstDoubleValues); + return replaceableConstDoubleValues; } @Override @@ -38,8 +39,12 @@ public boolean needsScores() { @Override public Explanation explain(LeafReaderContext ctx, int docId, Explanation scoreExplanation) throws IOException { - if (fv.advanceExact(docId)) return Explanation.match((float) fv.doubleValue(), "ReplaceableConstDoubleValues"); - else return Explanation.noMatch("ReplaceableConstDoubleValues"); + // TODO where is this explain called? I bet it's never tested, and probably never called. + ReplaceableConstDoubleValues fv = specialValues.get(ctx); + if (fv.advanceExact(docId)) { + return Explanation.match((float) fv.doubleValue(), "ReplaceableConstDoubleValues"); + } + return Explanation.noMatch("ReplaceableConstDoubleValues"); } @Override @@ -52,7 +57,9 @@ public int hashCode() { return System.identityHashCode(this); } - public void setValue(double v) { + public void setValue(LeafReaderContext ctx, double v) { + ReplaceableConstDoubleValues fv = specialValues.get(ctx); + assert fv != null : "getValues must be called before setValue for any given leaf reader context"; fv.setValue(v); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index f5b27a48960d2..34e5ed3a5e5b7 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -43,8 +43,8 @@ import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java index fdf672f3cfbf4..569c510788b7f 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/ReindexPlugin.java @@ -35,8 +35,8 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java index 72546d98d1f16..3be475a9483bf 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ReindexFromRemoteWithAuthTests.java @@ -45,9 +45,9 @@ import org.elasticsearch.rest.root.MainRestPlugin; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index dd70e4778dd75..cc57692f7ba0c 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -30,10 +30,10 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 3b5f1928871fe..fad293030cbd2 100644 --- a/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/modules/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -14,10 +14,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.junit.After; import org.junit.Before; diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 6968e307a403b..120d421fdf48c 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -31,8 +31,8 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java index 9f157e2d291cd..78044ae35c5a0 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java @@ -28,8 +28,8 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.url.URLRepository; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java index 76cf25266379d..2dc26f665633c 100644 --- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java +++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/RuntimeFieldsCommonPlugin.java @@ -22,8 +22,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java index 37109bcf01f06..db119761641aa 100644 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java @@ -26,9 +26,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java index a6e5f806babd2..3b7318c8d20f2 100644 --- a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java @@ -12,11 +12,11 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.hamcrest.OptionalMatchers; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.io.IOException; import java.util.Optional; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 208b0bf62fb6d..8eb1a5789102c 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -55,8 +55,8 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.netty4.internal.HttpHeadersAuthenticatorUtils; import org.elasticsearch.http.netty4.internal.HttpValidator; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.AcceptChannelHandler; import org.elasticsearch.transport.netty4.NetUtils; import org.elasticsearch.transport.netty4.Netty4Utils; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java index 9d818c12e6da5..2934d425709f2 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Plugin.java @@ -26,8 +26,8 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java index 02f2fab86f9c8..0b00c7d9e8ff8 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4BadRequestTests.java @@ -23,10 +23,10 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.transport.netty4.TLSConfig; import org.junit.After; diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 1b6572ee0c24f..a31e2c36062f7 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -25,10 +25,10 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.transport.netty4.TLSConfig; import org.junit.After; diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java index 452a006d26d4f..b0a42470bdb22 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerTransportTests.java @@ -68,10 +68,10 @@ import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.NettyAllocator; import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.transport.netty4.TLSConfig; diff --git a/qa/apm/src/test/java/org/elasticsearch/tracing/apm/ApmIT.java b/qa/apm/src/test/java/org/elasticsearch/telemetry/apm/ApmIT.java similarity index 99% rename from qa/apm/src/test/java/org/elasticsearch/tracing/apm/ApmIT.java rename to qa/apm/src/test/java/org/elasticsearch/telemetry/apm/ApmIT.java index 41421a4485267..021d9f8d01bf3 100644 --- a/qa/apm/src/test/java/org/elasticsearch/tracing/apm/ApmIT.java +++ b/qa/apm/src/test/java/org/elasticsearch/telemetry/apm/ApmIT.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing.apm; +package org.elasticsearch.telemetry.apm; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; diff --git a/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java b/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java index 504802d5cfd7c..37c79fe2abb0b 100644 --- a/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java +++ b/qa/custom-rest-controller/src/javaRestTest/java/co/elastic/elasticsearch/test/CustomRestPlugin.java @@ -19,7 +19,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.usage.UsageService; import java.util.function.UnaryOperator; diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java index eda3c337c98f4..58bf1760551d4 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilBootstrapChecksTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.bootstrap; import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.node.NodeValidationException; import org.elasticsearch.test.AbstractBootstrapCheckTestCase; @@ -49,7 +50,17 @@ public void tearDown() throws Exception { public void testEnforceBootstrapChecks() throws NodeValidationException { setEsEnforceBootstrapChecks("true"); - final List checks = Collections.singletonList(context -> BootstrapCheck.BootstrapCheckResult.failure("error")); + final List checks = Collections.singletonList(new BootstrapCheck() { + @Override + public BootstrapCheckResult check(BootstrapContext context) { + return BootstrapCheck.BootstrapCheckResult.failure("error"); + } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECKS; + } + }); final Logger logger = mock(Logger.class); diff --git a/qa/rolling-upgrade-legacy/build.gradle b/qa/rolling-upgrade-legacy/build.gradle new file mode 100644 index 0000000000000..7aca34bef8a1b --- /dev/null +++ b/qa/rolling-upgrade-legacy/build.gradle @@ -0,0 +1,143 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + + +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.BwcVersions +import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask + +apply plugin: 'elasticsearch.internal-testclusters' +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.bwc-test' +apply plugin: 'elasticsearch.rest-resources' + +BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> + /* + * NOTE: This module is for the tests that were problematic when converting :qa:rolling-upgrade to the junit-based bwc test definition + * Over time, these should be migrated into the :qa:rolling-upgrade module and fixed properly + * + * The goal here is to: + *
    + *
  • start three nodes on the old version + *
  • run tests with systemProperty 'tests.rest.suite', 'old_cluster' + *
  • upgrade one node + *
  • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' + *
  • upgrade one more node + *
  • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again + *
  • updgrade the last node + *
  • run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster' + *
+ */ + + def baseCluster = testClusters.register(baseName) { + versions = [bwcVersion.toString(), project.version] + numberOfNodes = 3 + + setting 'repositories.url.allowed_urls', 'http://snapshot.test*' + setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" + setting 'xpack.security.enabled', 'false' + setting 'logger.org.elasticsearch.cluster.service.MasterService', 'TRACE' + setting 'logger.org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator', 'TRACE' + setting 'logger.org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders', 'TRACE' + requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") + } + + String oldVersion = bwcVersion.toString() + + tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { + dependsOn "processTestResources" + useCluster baseCluster + mustRunAfter("precommit") + doFirst { + delete("${buildDir}/cluster/shared/repo/${baseName}") + } + def excludeList = [] + systemProperty 'tests.rest.suite', 'old_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion + nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.clustername', baseName) + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["old_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["old_cluster/30_vector_search/Create indexed byte vectors and search"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } + } + + tasks.register("${baseName}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) { + dependsOn "${baseName}#oldClusterTest" + useCluster baseCluster + doFirst { + baseCluster.get().nextNodeToNextVersion() + } + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion + systemProperty 'tests.first_round', 'true' + nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.clustername', baseName) + def excludeList = [] + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/Search byte indices created in old cluster"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } + } + + tasks.register("${baseName}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) { + dependsOn "${baseName}#oneThirdUpgradedTest" + useCluster baseCluster + doFirst { + baseCluster.get().nextNodeToNextVersion() + } + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion + systemProperty 'tests.first_round', 'false' + nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.clustername', baseName) + def excludeList = [] + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["mixed_cluster/30_vector_search/Search byte indices created in old cluster"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } + } + + tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { + dependsOn "${baseName}#twoThirdsUpgradedTest" + doFirst { + baseCluster.get().nextNodeToNextVersion() + } + useCluster testClusters.named(baseName) + systemProperty 'tests.rest.suite', 'upgraded_cluster' + systemProperty 'tests.upgrade_from_version', oldVersion + nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) + nonInputProperties.systemProperty('tests.clustername', baseName) + def excludeList = [] + if (bwcVersion.before("8.4.0")) { + excludeList.addAll(["upgraded_cluster/30_vector_search/*"]) + } else if (bwcVersion.before("8.6.0")) { + excludeList.addAll(["upgraded_cluster/30_vector_search/Search byte indices created in old cluster"]) + } + if (excludeList.isEmpty() == false) { + systemProperty 'tests.rest.blacklist', excludeList.join(',') + } + } + + tasks.register(bwcTaskName(bwcVersion)) { + dependsOn tasks.named("${baseName}#upgradedClusterTest") + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java similarity index 79% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java rename to qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 008a718be5873..74a8eb7fd1988 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -9,11 +9,8 @@ import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.rest.ESRestTestCase; -import static org.hamcrest.Matchers.lessThan; - public abstract class AbstractRollingTestCase extends ESRestTestCase { protected enum ClusterType { OLD, @@ -34,16 +31,6 @@ public static ClusterType parse(String value) { protected static final boolean FIRST_MIXED_ROUND = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); - protected static IndexVersion getOldClusterIndexVersion() { - var version = UPGRADE_FROM_VERSION; - if (version.equals(org.elasticsearch.Version.CURRENT)) { - return IndexVersion.current(); - } else { - assertThat("Index version needs to be added to rolling test parameters", version, lessThan(org.elasticsearch.Version.V_8_11_0)); - return IndexVersion.fromId(version.id); - } - } - @Override protected final boolean resetFeatureStates() { return false; @@ -54,11 +41,6 @@ protected final boolean preserveIndicesUponCompletion() { return true; } - @Override - protected final boolean preserveDataStreamsUponCompletion() { - return true; - } - @Override protected final boolean preserveReposUponCompletion() { return true; diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java similarity index 97% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java rename to qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 39700514cd79f..077eae88fba02 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -41,7 +41,6 @@ import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY; -import static org.elasticsearch.upgrades.UpgradeWithOldIndexSettingsIT.updateIndexSettingsPermittingSlowlogDeprecationWarning; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.in; @@ -747,4 +746,23 @@ public void testSoftDeletesDisabledWarning() throws Exception { ensureGreen(indexName); indexDocs(indexName, randomInt(100), randomInt(100)); } + + /* + * Copied from UpgradeWithOldIndexSettingsIT in the new format + */ + private static void updateIndexSettingsPermittingSlowlogDeprecationWarning(String index, Settings.Builder settings) throws IOException { + Request request = new Request("PUT", "/" + index + "/_settings"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(settings.build())); + if (UPGRADE_FROM_VERSION.before(Version.V_7_17_9)) { + // There is a bug (fixed in 7.17.9 and 8.7.0 where deprecation warnings could leak into ClusterApplierService#applyChanges) + // Below warnings are set (and leaking) from an index in this test case + request.setOptions(expectVersionSpecificWarnings(v -> { + v.compatible( + "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will be removed in a future release! " + + "See the breaking changes documentation for the next major version." + ); + })); + } + client().performRequest(request); + } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java similarity index 95% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java rename to qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 0f829f20fe3c4..068747d5a4824 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -40,11 +40,6 @@ protected boolean preserveTemplatesUponCompletion() { return true; } - @Override - protected boolean preserveDataStreamsUponCompletion() { - return true; - } - public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/mixed_cluster/20_camel_case_on_format.yml diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_vector_search.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/mixed_cluster/30_vector_search.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_vector_search.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/mixed_cluster/30_vector_search.yml diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/old_cluster/20_camel_case_on_format.yml diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml similarity index 99% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml index 11e9fdc2cca95..b471fa56a47a5 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml +++ b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml @@ -11,7 +11,6 @@ bdv: type: dense_vector dims: 3 - index: false knn: type: dense_vector dims: 3 @@ -126,7 +125,6 @@ bdv: type: dense_vector element_type: byte - index: false dims: 3 knn: type: dense_vector diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/upgraded_cluster/20_camel_case_on_format.yml diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_vector_search.yml b/qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/upgraded_cluster/30_vector_search.yml similarity index 100% rename from qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/30_vector_search.yml rename to qa/rolling-upgrade-legacy/src/test/resources/rest-api-spec/test/upgraded_cluster/30_vector_search.yml diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index d3078dd8c9381..ea582ea7fc213 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -6,135 +6,25 @@ * Side Public License, v 1. */ - -import org.elasticsearch.gradle.Version -import org.elasticsearch.gradle.internal.BwcVersions import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.bwc-test' -apply plugin: 'elasticsearch.rest-resources' - -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> - /* - * The goal here is to: - *
    - *
  • start three nodes on the old version - *
  • run tests with systemProperty 'tests.rest.suite', 'old_cluster' - *
  • upgrade one node - *
  • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' - *
  • upgrade one more node - *
  • run tests with systemProperty 'tests.rest.suite', 'mixed_cluster' again - *
  • updgrade the last node - *
  • run tests with systemProperty 'tests.rest.suite', 'upgraded_cluster' - *
- */ - - def baseCluster = testClusters.register(baseName) { - versions = [bwcVersion.toString(), project.version] - numberOfNodes = 3 - - setting 'repositories.url.allowed_urls', 'http://snapshot.test*' - setting 'path.repo', "${buildDir}/cluster/shared/repo/${baseName}" - setting 'xpack.security.enabled', 'false' - setting 'logger.org.elasticsearch.cluster.service.MasterService', 'TRACE' - setting 'logger.org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceShardsAllocator', 'TRACE' - setting 'logger.org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders', 'TRACE' - requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0") - } - - String oldVersion = bwcVersion.toString() - - tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - dependsOn "processTestResources" - useCluster baseCluster - mustRunAfter("precommit") - doFirst { - delete("${buildDir}/cluster/shared/repo/${baseName}") - } - def excludeList = [] - systemProperty 'tests.rest.suite', 'old_cluster' - systemProperty 'tests.upgrade_from_version', oldVersion - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - if (bwcVersion.before("8.4.0")) { - excludeList.addAll(["old_cluster/30_vector_search/*"]) - } else if (bwcVersion.before("8.6.0")) { - excludeList.addAll(["old_cluster/30_vector_search/Create indexed byte vectors and search"]) - } - if (excludeList.isEmpty() == false) { - systemProperty 'tests.rest.blacklist', excludeList.join(',') - } - } - - tasks.register("${baseName}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) { - dependsOn "${baseName}#oldClusterTest" - useCluster baseCluster - doFirst { - baseCluster.get().nextNodeToNextVersion() - } - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.upgrade_from_version', oldVersion - systemProperty 'tests.first_round', 'true' - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - def excludeList = [] - if (bwcVersion.before("8.4.0")) { - excludeList.addAll(["mixed_cluster/30_vector_search/*"]) - } else if (bwcVersion.before("8.6.0")) { - excludeList.addAll(["mixed_cluster/30_vector_search/Search byte indices created in old cluster"]) - } - if (excludeList.isEmpty() == false) { - systemProperty 'tests.rest.blacklist', excludeList.join(',') - } - } - tasks.register("${baseName}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) { - dependsOn "${baseName}#oneThirdUpgradedTest" - useCluster baseCluster - doFirst { - baseCluster.get().nextNodeToNextVersion() - } - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.upgrade_from_version', oldVersion - systemProperty 'tests.first_round', 'false' - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - def excludeList = [] - if (bwcVersion.before("8.4.0")) { - excludeList.addAll(["mixed_cluster/30_vector_search/*"]) - } else if (bwcVersion.before("8.6.0")) { - excludeList.addAll(["mixed_cluster/30_vector_search/Search byte indices created in old cluster"]) - } - if (excludeList.isEmpty() == false) { - systemProperty 'tests.rest.blacklist', excludeList.join(',') - } - } +testArtifacts { + registerTestArtifactFromSourceSet(sourceSets.javaRestTest) +} - tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - dependsOn "${baseName}#twoThirdsUpgradedTest" - doFirst { - baseCluster.get().nextNodeToNextVersion() - } - useCluster testClusters.named(baseName) - systemProperty 'tests.rest.suite', 'upgraded_cluster' - systemProperty 'tests.upgrade_from_version', oldVersion - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - def excludeList = [] - if (bwcVersion.before("8.4.0")) { - excludeList.addAll(["upgraded_cluster/30_vector_search/*"]) - } else if (bwcVersion.before("8.6.0")) { - excludeList.addAll(["upgraded_cluster/30_vector_search/Search byte indices created in old cluster"]) - } - if (excludeList.isEmpty() == false) { - systemProperty 'tests.rest.blacklist', excludeList.join(',') - } +BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) } +} - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn tasks.named("${baseName}#upgradedClusterTest") - } +testClusters.configureEach { + setting 'xpack.security.enabled', 'false' } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java similarity index 82% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java index 5bafccf7aee1b..e945d457986d0 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/DesiredNodesUpgradeIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.desirednodes.UpdateDesiredNodesRequest; import org.elasticsearch.client.Request; @@ -25,24 +27,33 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; -public class DesiredNodesUpgradeIT extends AbstractRollingTestCase { +public class DesiredNodesUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + private final int desiredNodesVersion; + + public DesiredNodesUpgradeIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + desiredNodesVersion = Objects.requireNonNullElse(upgradeNode, -1) + 2; + } + private enum ProcessorsPrecision { DOUBLE, FLOAT } public void testUpgradeDesiredNodes() throws Exception { - assumeTrue("Desired nodes was introduced in 8.1", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_1_0)); + assumeTrue("Desired nodes was introduced in 8.1", getOldClusterVersion().onOrAfter(Version.V_8_1_0)); - if (UPGRADE_FROM_VERSION.onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { + if (getOldClusterVersion().onOrAfter(Processors.DOUBLE_PROCESSORS_SUPPORT_VERSION)) { assertUpgradedNodesCanReadDesiredNodes(); - } else if (UPGRADE_FROM_VERSION.onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { + } else if (getOldClusterVersion().onOrAfter(DesiredNode.RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION)) { assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent(); } else { assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions(); @@ -50,13 +61,7 @@ public void testUpgradeDesiredNodes() throws Exception { } private void assertUpgradedNodesCanReadDesiredNodes() throws Exception { - final int desiredNodesVersion = switch (CLUSTER_TYPE) { - case OLD -> 1; - case MIXED -> FIRST_MIXED_ROUND ? 2 : 3; - case UPGRADED -> 4; - }; - - if (CLUSTER_TYPE != ClusterType.OLD) { + if (isMixedCluster() || isUpgradedCluster()) { final Map desiredNodes = getLatestDesiredNodes(); final String historyId = extractValue(desiredNodes, "history_id"); final int version = extractValue(desiredNodes, "version"); @@ -83,13 +88,7 @@ private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws ) .toList(); - final int desiredNodesVersion = switch (CLUSTER_TYPE) { - case OLD -> 1; - case MIXED -> FIRST_MIXED_ROUND ? 2 : 3; - case UPGRADED -> 4; - }; - - if (CLUSTER_TYPE != ClusterType.OLD) { + if (isMixedCluster() || isUpgradedCluster()) { updateDesiredNodes(desiredNodes, desiredNodesVersion - 1); } for (int i = 0; i < 2; i++) { @@ -100,28 +99,25 @@ private void assertDesiredNodesUpdatedWithRoundedUpFloatsAreIdempotent() throws final int latestDesiredNodesVersion = extractValue(latestDesiredNodes, "version"); assertThat(latestDesiredNodesVersion, is(equalTo(desiredNodesVersion))); - if (CLUSTER_TYPE == ClusterType.UPGRADED) { + if (isUpgradedCluster()) { assertAllDesiredNodesAreActualized(); } } private void assertDesiredNodesWithFloatProcessorsAreRejectedInOlderVersions() throws Exception { - switch (CLUSTER_TYPE) { - case OLD -> addClusterNodesToDesiredNodesWithIntegerProcessors(1); - case MIXED -> { - int version = FIRST_MIXED_ROUND ? 2 : 3; - // Processor ranges or float processors are forbidden during upgrades: 8.2 -> 8.3 clusters - final var responseException = expectThrows( - ResponseException.class, - () -> addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(version, ProcessorsPrecision.FLOAT) - ); - final var statusCode = responseException.getResponse().getStatusLine().getStatusCode(); - assertThat(statusCode, is(equalTo(400))); - } - case UPGRADED -> { - assertAllDesiredNodesAreActualized(); - addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(4, ProcessorsPrecision.FLOAT); - } + if (isOldCluster()) { + addClusterNodesToDesiredNodesWithIntegerProcessors(1); + } else if (isMixedCluster()) { + // Processor ranges or float processors are forbidden during upgrades: 8.2 -> 8.3 clusters + final var responseException = expectThrows( + ResponseException.class, + () -> addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(desiredNodesVersion, ProcessorsPrecision.FLOAT) + ); + final var statusCode = responseException.getResponse().getStatusLine().getStatusCode(); + assertThat(statusCode, is(equalTo(400))); + } else { + assertAllDesiredNodesAreActualized(); + addClusterNodesToDesiredNodesWithProcessorsOrProcessorRanges(4, ProcessorsPrecision.FLOAT); } getLatestDesiredNodes(); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java similarity index 90% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java index 588802fb50709..307e9946b7601 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction; import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; @@ -21,14 +23,17 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class FeatureUpgradeIT extends AbstractRollingTestCase { +public class FeatureUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + public FeatureUpgradeIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } - @SuppressWarnings("unchecked") public void testGetFeatureUpgradeStatus() throws Exception { final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct " + "access to system indices will be prevented by default"; - if (CLUSTER_TYPE == ClusterType.OLD) { + if (isOldCluster()) { // setup - put something in the tasks index // create index Request createTestIndex = new Request("PUT", "/feature_test_index_old"); @@ -79,7 +84,7 @@ public void testGetFeatureUpgradeStatus() throws Exception { } }); - } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + } else if (isUpgradedCluster()) { // check results assertBusy(() -> { Request clusterStateRequest = new Request("GET", "/_migration/system_features"); @@ -95,7 +100,7 @@ public void testGetFeatureUpgradeStatus() throws Exception { assertThat(feature, aMapWithSize(4)); assertThat(feature.get("minimum_index_version"), equalTo(getOldClusterIndexVersion().toString())); - if (UPGRADE_FROM_VERSION.before(TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION)) { + if (getOldClusterVersion().before(TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION)) { assertThat(feature.get("migration_status"), equalTo("MIGRATION_NEEDED")); } else { assertThat(feature.get("migration_status"), equalTo("NO_MIGRATION_NEEDED")); @@ -103,5 +108,4 @@ public void testGetFeatureUpgradeStatus() throws Exception { }); } } - } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FieldCapsIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java similarity index 94% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FieldCapsIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java index 83865222a8867..333cff3c4e039 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FieldCapsIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/FieldCapsIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.HttpHost; import org.elasticsearch.Version; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; @@ -36,15 +38,17 @@ * In 8.2 we also added the ability to filter fields by type and metadata, with some post-hoc filtering applied on * the co-ordinating node if older nodes were included in the system */ -public class FieldCapsIT extends AbstractRollingTestCase { - private static boolean indicesCreated = false; +public class FieldCapsIT extends ParameterizedRollingUpgradeTestCase { + + public FieldCapsIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } + + private static boolean oldIndicesCreated; + private static boolean newIndicesCreated; @Before public void setupIndices() throws Exception { - if (indicesCreated) { - return; - } - indicesCreated = true; final String redMapping = """ "properties": { "red_field": { "type": "keyword" }, @@ -63,7 +67,7 @@ public void setupIndices() throws Exception { "timestamp": {"type": "date"} } """; - if (CLUSTER_TYPE == ClusterType.OLD) { + if (isOldCluster() && oldIndicesCreated == false) { createIndex("old_red_1", Settings.EMPTY, redMapping); createIndex("old_red_2", Settings.EMPTY, redMapping); createIndex("old_red_empty", Settings.EMPTY, redMapping); @@ -78,7 +82,8 @@ public void setupIndices() throws Exception { ); assertOK(client().performRequest(indexRequest)); } - } else if (CLUSTER_TYPE == ClusterType.MIXED && FIRST_MIXED_ROUND) { + oldIndicesCreated = true; + } else if (isFirstMixedCluster() && newIndicesCreated == false) { createIndex("new_red_1", Settings.EMPTY, redMapping); createIndex("new_red_2", Settings.EMPTY, redMapping); createIndex("new_red_empty", Settings.EMPTY, redMapping); @@ -93,6 +98,7 @@ public void setupIndices() throws Exception { ); assertOK(client().performRequest(indexRequest)); } + newIndicesCreated = true; } } @@ -149,7 +155,7 @@ public void testOldIndicesWithIndexFilter() throws Exception { } public void testNewIndicesOnly() throws Exception { - assumeFalse("required mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.OLD); + assumeFalse("required mixed or upgraded cluster", isOldCluster()); { FieldCapabilitiesResponse resp = fieldCaps(List.of("new_red_*"), List.of("*"), null, null, null); assertThat(resp.getIndices(), equalTo(new String[] { "new_red_1", "new_red_2", "new_red_empty" })); @@ -177,7 +183,7 @@ public void testNewIndicesOnly() throws Exception { } public void testNewIndicesOnlyWithIndexFilter() throws Exception { - assumeFalse("required mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.OLD); + assumeFalse("required mixed or upgraded cluster", isOldCluster()); final QueryBuilder indexFilter = QueryBuilders.rangeQuery("timestamp").gte("2020-01-01").lte("2020-12-12"); { FieldCapabilitiesResponse resp = fieldCaps(List.of("new_red_*"), List.of("*"), indexFilter, null, null); @@ -203,7 +209,7 @@ public void testNewIndicesOnlyWithIndexFilter() throws Exception { } public void testAllIndices() throws Exception { - assumeFalse("required mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.OLD); + assumeFalse("required mixed or upgraded cluster", isOldCluster()); FieldCapabilitiesResponse resp = fieldCaps(List.of("old_*", "new_*"), List.of("*"), null, null, null); assertThat( resp.getIndices(), @@ -235,7 +241,7 @@ public void testAllIndices() throws Exception { } public void testAllIndicesWithIndexFilter() throws Exception { - assumeFalse("required mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.OLD); + assumeFalse("required mixed or upgraded cluster", isOldCluster()); final QueryBuilder indexFilter = QueryBuilders.rangeQuery("timestamp").gte("2020-01-01").lte("2020-12-12"); FieldCapabilitiesResponse resp = fieldCaps(List.of("old_*", "new_*"), List.of("*"), indexFilter, null, null); assertThat( @@ -285,7 +291,7 @@ private RestClient getUpgradedNodeClient() throws IOException { // because we are testing that the upgraded node will correctly apply filtering // to responses from older nodes that don't understand the filter parameters public void testAllIndicesWithFieldTypeFilter() throws Exception { - assumeFalse("required mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.OLD); + assumeFalse("required mixed or upgraded cluster", isOldCluster()); RestClient restClient = getUpgradedNodeClient(); FieldCapabilitiesResponse resp = fieldCaps(restClient, List.of("old_*", "new_*"), List.of("*"), null, "keyword", null); assertThat(resp.getField("red_field").keySet(), contains("keyword")); @@ -298,7 +304,7 @@ public void testAllIndicesWithFieldTypeFilter() throws Exception { // because we are testing that the upgraded node will correctly apply filtering // to responses from older nodes that don't understand the filter parameters public void testAllIndicesWithExclusionFilter() throws Exception { - assumeFalse("required mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.OLD); + assumeFalse("required mixed or upgraded cluster", isOldCluster()); RestClient client = getUpgradedNodeClient(); { FieldCapabilitiesResponse resp = fieldCaps(client, List.of("old_*", "new_*"), List.of("*"), null, null, null); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java similarity index 52% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java index b860e53d447b5..ecd327b0d66c8 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/IndexingIT.java @@ -7,6 +7,8 @@ */ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; @@ -15,7 +17,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.core.Booleans; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.ListMatcher; import org.elasticsearch.xcontent.XContentBuilder; @@ -40,39 +41,36 @@ /** * Basic test that indexed documents survive the rolling restart. See - * {@link RecoveryIT} for much more in depth testing of the mechanism + * {@code RecoveryIT} for much more in depth testing of the mechanism * by which they survive. *

* This test is an almost exact copy of IndexingIT in the * xpack rolling restart tests. We should work on a way to remove this * duplication but for now we have no real way to share code. */ -public class IndexingIT extends AbstractRollingTestCase { +public class IndexingIT extends ParameterizedRollingUpgradeTestCase { + + public IndexingIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } public void testIndexing() throws IOException { - switch (CLUSTER_TYPE) { - case OLD: - break; - case MIXED: - Request waitForYellow = new Request("GET", "/_cluster/health"); - waitForYellow.addParameter("wait_for_nodes", "3"); - waitForYellow.addParameter("wait_for_status", "yellow"); - client().performRequest(waitForYellow); - break; - case UPGRADED: - Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index"); - waitForGreen.addParameter("wait_for_nodes", "3"); - waitForGreen.addParameter("wait_for_status", "green"); - // wait for long enough that we give delayed unassigned shards to stop being delayed - waitForGreen.addParameter("timeout", "70s"); - waitForGreen.addParameter("level", "shards"); - client().performRequest(waitForGreen); - break; - default: - throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + if (isMixedCluster()) { + Request waitForYellow = new Request("GET", "/_cluster/health"); + waitForYellow.addParameter("wait_for_nodes", "3"); + waitForYellow.addParameter("wait_for_status", "yellow"); + client().performRequest(waitForYellow); + } else if (isUpgradedCluster()) { + Request waitForGreen = new Request("GET", "/_cluster/health/test_index,index_with_replicas,empty_index"); + waitForGreen.addParameter("wait_for_nodes", "3"); + waitForGreen.addParameter("wait_for_status", "green"); + // wait for long enough that we give delayed unassigned shards to stop being delayed + waitForGreen.addParameter("timeout", "70s"); + waitForGreen.addParameter("level", "shards"); + client().performRequest(waitForGreen); } - if (CLUSTER_TYPE == ClusterType.OLD) { + if (isOldCluster()) { Request createTestIndex = new Request("PUT", "/test_index"); createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); useIgnoreMultipleMatchingTemplatesWarningsHandler(createTestIndex); @@ -95,30 +93,20 @@ public void testIndexing() throws IOException { } int expectedCount; - switch (CLUSTER_TYPE) { - case OLD: - expectedCount = 5; - break; - case MIXED: - if (Booleans.parseBoolean(System.getProperty("tests.first_round"))) { - expectedCount = 5; - } else { - expectedCount = 10; - } - break; - case UPGRADED: - expectedCount = 15; - break; - default: - throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + if (isOldCluster() || isFirstMixedCluster()) { + expectedCount = 5; + } else if (isMixedCluster()) { + expectedCount = 10; + } else { + expectedCount = 15; } assertCount("test_index", expectedCount); assertCount("index_with_replicas", 5); assertCount("empty_index", 0); - if (CLUSTER_TYPE != ClusterType.OLD) { - bulk("test_index", "_" + CLUSTER_TYPE, 5); + if (isOldCluster() == false) { + bulk("test_index", "_" + (isMixedCluster() ? "MIXED" : "UPGRADED"), 5); Request toBeDeleted = new Request("PUT", "/test_index/_doc/to_be_deleted"); toBeDeleted.addParameter("refresh", "true"); toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); @@ -143,82 +131,76 @@ public void testAutoIdWithOpTypeCreate() throws IOException { bulk.addParameter("refresh", "true"); bulk.setJsonEntity(b); - switch (CLUSTER_TYPE) { - case OLD -> { - Request createTestIndex = new Request("PUT", "/" + indexName); - createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); - client().performRequest(createTestIndex); - } - case MIXED -> { - Request waitForGreen = new Request("GET", "/_cluster/health"); - waitForGreen.addParameter("wait_for_nodes", "3"); - client().performRequest(waitForGreen); - Version minNodeVersion = minNodeVersion(); - if (minNodeVersion.before(Version.V_7_5_0)) { - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat( - e.getMessage(), - // if request goes to 7.5+ node - either(containsString("optype create not supported for indexing requests without explicit id until")) - // if request goes to < 7.5 node - .or(containsString("an id must be provided if version type or value are set")) - ); - } else { - client().performRequest(bulk); - } + if (isOldCluster()) { + Request createTestIndex = new Request("PUT", "/" + indexName); + createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); + client().performRequest(createTestIndex); + } else if (isMixedCluster()) { + Request waitForGreen = new Request("GET", "/_cluster/health"); + waitForGreen.addParameter("wait_for_nodes", "3"); + client().performRequest(waitForGreen); + Version minNodeVersion = minNodeVersion(); + if (minNodeVersion.before(Version.V_7_5_0)) { + ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); + assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); + assertThat( + e.getMessage(), + // if request goes to 7.5+ node + either(containsString("optype create not supported for indexing requests without explicit id until")) + // if request goes to < 7.5 node + .or(containsString("an id must be provided if version type or value are set")) + ); + } else { + client().performRequest(bulk); } - case UPGRADED -> client().performRequest(bulk); - default -> throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } else if (isUpgradedCluster()) { + client().performRequest(bulk); } } public void testDateNanosFormatUpgrade() throws IOException { final String indexName = "test_date_nanos"; - switch (CLUSTER_TYPE) { - case OLD -> { - Request createIndex = new Request("PUT", "/" + indexName); - XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent()) - .startObject() - .startObject("mappings") - .startObject("properties") - .startObject("date") - .field("type", "date") - .endObject() - .startObject("date_nanos") - .field("type", "date_nanos") - .endObject() - .endObject() - .endObject() - .endObject(); - createIndex.setJsonEntity(Strings.toString(mappings)); - client().performRequest(createIndex); - Request index = new Request("POST", "/" + indexName + "/_doc/"); - XContentBuilder doc = XContentBuilder.builder(XContentType.JSON.xContent()) - .startObject() - .field("date", "2015-01-01T12:10:30.123456789Z") - .field("date_nanos", "2015-01-01T12:10:30.123456789Z") - .endObject(); - index.addParameter("refresh", "true"); - index.setJsonEntity(Strings.toString(doc)); - client().performRequest(index); - } - case UPGRADED -> { - Request search = new Request("POST", "/" + indexName + "/_search"); - XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) - .startObject() - .array("fields", new String[] { "date", "date_nanos" }) - .endObject(); - search.setJsonEntity(Strings.toString(query)); - Map response = entityAsMap(client().performRequest(search)); - Map bestHit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", response))).get(0); - List date = (List) XContentMapValues.extractValue("fields.date", bestHit); - assertThat(date.size(), equalTo(1)); - assertThat(date.get(0), equalTo("2015-01-01T12:10:30.123Z")); - List dateNanos = (List) XContentMapValues.extractValue("fields.date_nanos", bestHit); - assertThat(dateNanos.size(), equalTo(1)); - assertThat(dateNanos.get(0), equalTo("2015-01-01T12:10:30.123456789Z")); - } + if (isOldCluster()) { + Request createIndex = new Request("PUT", "/" + indexName); + XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .startObject("mappings") + .startObject("properties") + .startObject("date") + .field("type", "date") + .endObject() + .startObject("date_nanos") + .field("type", "date_nanos") + .endObject() + .endObject() + .endObject() + .endObject(); + createIndex.setJsonEntity(Strings.toString(mappings)); + client().performRequest(createIndex); + Request index = new Request("POST", "/" + indexName + "/_doc/"); + XContentBuilder doc = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .field("date", "2015-01-01T12:10:30.123456789Z") + .field("date_nanos", "2015-01-01T12:10:30.123456789Z") + .endObject(); + index.addParameter("refresh", "true"); + index.setJsonEntity(Strings.toString(doc)); + client().performRequest(index); + } else if (isUpgradedCluster()) { + Request search = new Request("POST", "/" + indexName + "/_search"); + XContentBuilder query = XContentBuilder.builder(XContentType.JSON.xContent()) + .startObject() + .array("fields", new String[] { "date", "date_nanos" }) + .endObject(); + search.setJsonEntity(Strings.toString(query)); + Map response = entityAsMap(client().performRequest(search)); + Map bestHit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", response))).get(0); + List date = (List) XContentMapValues.extractValue("fields.date", bestHit); + assertThat(date.size(), equalTo(1)); + assertThat(date.get(0), equalTo("2015-01-01T12:10:30.123Z")); + List dateNanos = (List) XContentMapValues.extractValue("fields.date_nanos", bestHit); + assertThat(dateNanos.size(), equalTo(1)); + assertThat(dateNanos.get(0), equalTo("2015-01-01T12:10:30.123456789Z")); } } @@ -247,51 +229,45 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio } public void testTsdb() throws IOException { - assumeTrue("indexing time series indices changed in 8.2.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_2_0)); + assumeTrue("indexing time series indices changed in 8.2.0", getOldClusterVersion().onOrAfter(Version.V_8_2_0)); StringBuilder bulk = new StringBuilder(); - switch (CLUSTER_TYPE) { - case OLD -> { - createTsdbIndex(); - tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[0], TSDB_TIMES[1], 0.1); - tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[0], TSDB_TIMES[1], -0.1); - bulk("tsdb", bulk.toString()); - assertTsdbAgg(closeTo(215.95, 0.005), closeTo(-215.95, 0.005)); - return; - } - case MIXED -> { - if (FIRST_MIXED_ROUND) { - tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[1], TSDB_TIMES[2], 0.1); - tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[1], TSDB_TIMES[2], -0.1); - tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[0], TSDB_TIMES[2], 1.1); - bulk("tsdb", bulk.toString()); - assertTsdbAgg(closeTo(217.45, 0.005), closeTo(-217.45, 0.005), closeTo(2391.95, 0.005)); - return; - } - tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[2], TSDB_TIMES[3], 0.1); - tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[2], TSDB_TIMES[3], -0.1); - tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[2], TSDB_TIMES[3], 1.1); - tsdbBulk(bulk, TSDB_DIMS.get(3), TSDB_TIMES[0], TSDB_TIMES[3], 10); - bulk("tsdb", bulk.toString()); - assertTsdbAgg(closeTo(218.95, 0.005), closeTo(-218.95, 0.005), closeTo(2408.45, 0.005), closeTo(21895, 0.5)); - return; - } - case UPGRADED -> { - tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[3], TSDB_TIMES[4], 0.1); - tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[3], TSDB_TIMES[4], -0.1); - tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[3], TSDB_TIMES[4], 1.1); - tsdbBulk(bulk, TSDB_DIMS.get(3), TSDB_TIMES[3], TSDB_TIMES[4], 10); - tsdbBulk(bulk, TSDB_DIMS.get(4), TSDB_TIMES[0], TSDB_TIMES[4], -5); - bulk("tsdb", bulk.toString()); - assertTsdbAgg( - closeTo(220.45, 0.005), - closeTo(-220.45, 0.005), - closeTo(2424.95, 0.005), - closeTo(22045, 0.5), - closeTo(-11022.5, 0.5) - ); - return; - } + if (isOldCluster()) { + createTsdbIndex(); + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[0], TSDB_TIMES[1], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[0], TSDB_TIMES[1], -0.1); + bulk("tsdb", bulk.toString()); + assertTsdbAgg(closeTo(215.95, 0.005), closeTo(-215.95, 0.005)); + return; + } else if (isFirstMixedCluster()) { + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[1], TSDB_TIMES[2], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[1], TSDB_TIMES[2], -0.1); + tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[0], TSDB_TIMES[2], 1.1); + bulk("tsdb", bulk.toString()); + assertTsdbAgg(closeTo(217.45, 0.005), closeTo(-217.45, 0.005), closeTo(2391.95, 0.005)); + + } else if (isMixedCluster()) { + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[2], TSDB_TIMES[3], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[2], TSDB_TIMES[3], -0.1); + tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[2], TSDB_TIMES[3], 1.1); + tsdbBulk(bulk, TSDB_DIMS.get(3), TSDB_TIMES[0], TSDB_TIMES[3], 10); + bulk("tsdb", bulk.toString()); + assertTsdbAgg(closeTo(218.95, 0.005), closeTo(-218.95, 0.005), closeTo(2408.45, 0.005), closeTo(21895, 0.5)); + return; + } else { + tsdbBulk(bulk, TSDB_DIMS.get(0), TSDB_TIMES[3], TSDB_TIMES[4], 0.1); + tsdbBulk(bulk, TSDB_DIMS.get(1), TSDB_TIMES[3], TSDB_TIMES[4], -0.1); + tsdbBulk(bulk, TSDB_DIMS.get(2), TSDB_TIMES[3], TSDB_TIMES[4], 1.1); + tsdbBulk(bulk, TSDB_DIMS.get(3), TSDB_TIMES[3], TSDB_TIMES[4], 10); + tsdbBulk(bulk, TSDB_DIMS.get(4), TSDB_TIMES[0], TSDB_TIMES[4], -5); + bulk("tsdb", bulk.toString()); + assertTsdbAgg( + closeTo(220.45, 0.005), + closeTo(-220.45, 0.005), + closeTo(2424.95, 0.005), + closeTo(22045, 0.5), + closeTo(-11022.5, 0.5) + ); } } @@ -361,67 +337,60 @@ private void assertTsdbAgg(Matcher... expected) throws IOException { } public void testSyntheticSource() throws IOException { - assumeTrue("added in 8.4.0", UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_4_0)); - - switch (CLUSTER_TYPE) { - case OLD -> { - Request createIndex = new Request("PUT", "/synthetic"); - XContentBuilder indexSpec = XContentBuilder.builder(XContentType.JSON.xContent()).startObject(); - indexSpec.startObject("mappings"); - { - indexSpec.startObject("_source").field("mode", "synthetic").endObject(); - indexSpec.startObject("properties").startObject("kwd").field("type", "keyword").endObject().endObject(); - } - indexSpec.endObject(); - createIndex.setJsonEntity(Strings.toString(indexSpec.endObject())); - client().performRequest(createIndex); - bulk("synthetic", """ - {"index": {"_index": "synthetic", "_id": "old"}} - {"kwd": "old", "int": -12} - """); - break; - } - case MIXED -> { - if (FIRST_MIXED_ROUND) { - bulk("synthetic", """ - {"index": {"_index": "synthetic", "_id": "mixed_1"}} - {"kwd": "mixed_1", "int": 22} - """); - } else { - bulk("synthetic", """ - {"index": {"_index": "synthetic", "_id": "mixed_2"}} - {"kwd": "mixed_2", "int": 33} - """); - } - break; - } - case UPGRADED -> { - bulk("synthetic", """ - {"index": {"_index": "synthetic", "_id": "new"}} - {"kwd": "new", "int": 21341325} - """); + assumeTrue("added in 8.4.0", getOldClusterVersion().onOrAfter(Version.V_8_4_0)); + + if (isOldCluster()) { + Request createIndex = new Request("PUT", "/synthetic"); + XContentBuilder indexSpec = XContentBuilder.builder(XContentType.JSON.xContent()).startObject(); + indexSpec.startObject("mappings"); + { + indexSpec.startObject("_source").field("mode", "synthetic").endObject(); + indexSpec.startObject("properties").startObject("kwd").field("type", "keyword").endObject().endObject(); } + indexSpec.endObject(); + createIndex.setJsonEntity(Strings.toString(indexSpec.endObject())); + client().performRequest(createIndex); + bulk("synthetic", """ + {"index": {"_index": "synthetic", "_id": "old"}} + {"kwd": "old", "int": -12} + """); + } else if (isFirstMixedCluster()) { + bulk("synthetic", """ + {"index": {"_index": "synthetic", "_id": "mixed_1"}} + {"kwd": "mixed_1", "int": 22} + """); + } else if (isMixedCluster()) { + bulk("synthetic", """ + {"index": {"_index": "synthetic", "_id": "mixed_2"}} + {"kwd": "mixed_2", "int": 33} + """); + + } else { + bulk("synthetic", """ + {"index": {"_index": "synthetic", "_id": "new"}} + {"kwd": "new", "int": 21341325} + """); } assertMap( entityAsMap(client().performRequest(new Request("GET", "/synthetic/_doc/old"))), matchesMap().extraOk().entry("_source", matchesMap().entry("kwd", "old").entry("int", -12)) ); - if (CLUSTER_TYPE == ClusterType.OLD) { + if (isOldCluster()) { return; } assertMap( entityAsMap(client().performRequest(new Request("GET", "/synthetic/_doc/mixed_1"))), matchesMap().extraOk().entry("_source", matchesMap().entry("kwd", "mixed_1").entry("int", 22)) ); - if (CLUSTER_TYPE == ClusterType.MIXED && FIRST_MIXED_ROUND) { + if (isFirstMixedCluster()) { return; } assertMap( entityAsMap(client().performRequest(new Request("GET", "/synthetic/_doc/mixed_2"))), matchesMap().extraOk().entry("_source", matchesMap().entry("kwd", "mixed_2").entry("int", 33)) ); - if (CLUSTER_TYPE == ClusterType.MIXED) { + if (isMixedCluster()) { return; } assertMap( diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java new file mode 100644 index 0000000000000..e1500358327c5 --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedRollingUpgradeTestCase.java @@ -0,0 +1,226 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.FeatureFlag; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.ObjectPath; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TemporaryFolder; +import org.junit.rules.TestRule; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public abstract class ParameterizedRollingUpgradeTestCase extends ESRestTestCase { + private static final Version OLD_CLUSTER_VERSION = Version.fromString(System.getProperty("tests.old_cluster_version")); + + private static final TemporaryFolder repoDirectory = new TemporaryFolder(); + + private static final int NODE_NUM = 3; + + private static final ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .version(getOldClusterTestVersion()) + .nodes(NODE_NUM) + .setting("path.repo", new Supplier<>() { + @Override + @SuppressForbidden(reason = "TemporaryFolder only has io.File methods, not nio.File") + public String get() { + return repoDirectory.getRoot().getPath(); + } + }) + .setting("xpack.security.enabled", "false") + .feature(FeatureFlag.TIME_SERIES_MODE) + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(repoDirectory).around(cluster); + + @ParametersFactory(shuffle = false) + public static Iterable parameters() { + return Stream.concat(Stream.of((Integer) null), IntStream.range(0, NODE_NUM).boxed()).map(n -> new Object[] { n }).toList(); + } + + private static final Set upgradedNodes = new HashSet<>(); + private static boolean upgradeFailed = false; + private static IndexVersion oldIndexVersion; + + private final Integer requestedUpgradeNode; + + protected ParameterizedRollingUpgradeTestCase(@Name("upgradeNode") Integer upgradeNode) { + this.requestedUpgradeNode = upgradeNode; + } + + @Before + public void extractOldIndexVersion() throws Exception { + if (oldIndexVersion == null && upgradedNodes.isEmpty()) { + IndexVersion indexVersion = null; // these should all be the same version + + Request request = new Request("GET", "_nodes"); + request.addParameter("filter_path", "nodes.*.index_version,nodes.*.name"); + Response response = client().performRequest(request); + ObjectPath objectPath = ObjectPath.createFromResponse(response); + Map nodeMap = objectPath.evaluate("nodes"); + for (String id : nodeMap.keySet()) { + Number ix = objectPath.evaluate("nodes." + id + ".index_version"); + IndexVersion version; + if (ix != null) { + version = IndexVersion.fromId(ix.intValue()); + } else { + // it doesn't have index version (pre 8.11) - just infer it from the release version + version = IndexVersion.fromId(getOldClusterVersion().id); + } + + if (indexVersion == null) { + indexVersion = version; + } else { + String name = objectPath.evaluate("nodes." + id + ".name"); + assertThat("Node " + name + " has a different index version to other nodes", version, equalTo(indexVersion)); + } + } + + assertThat("Index version could not be read", indexVersion, notNullValue()); + oldIndexVersion = indexVersion; + } + } + + @Before + public void upgradeNode() throws Exception { + // Skip remaining tests if upgrade failed + assumeFalse("Cluster upgrade failed", upgradeFailed); + + if (requestedUpgradeNode != null && upgradedNodes.contains(requestedUpgradeNode) == false) { + closeClients(); + // we might be running a specific upgrade test by itself - check previous nodes too + for (int n = 0; n <= requestedUpgradeNode; n++) { + if (upgradedNodes.add(n)) { + try { + cluster.upgradeNodeToVersion(n, Version.CURRENT); + } catch (Exception e) { + upgradeFailed = true; + throw e; + } + } + } + initClient(); + } + } + + @AfterClass + public static void resetNodes() { + oldIndexVersion = null; + upgradedNodes.clear(); + upgradeFailed = false; + } + + protected static org.elasticsearch.Version getOldClusterVersion() { + return org.elasticsearch.Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + protected static IndexVersion getOldClusterIndexVersion() { + assert oldIndexVersion != null; + return oldIndexVersion; + } + + protected static Version getOldClusterTestVersion() { + return Version.fromString(OLD_CLUSTER_VERSION.toString()); + } + + protected static boolean isOldCluster() { + return upgradedNodes.isEmpty(); + } + + protected static boolean isFirstMixedCluster() { + return upgradedNodes.size() == 1; + } + + protected static boolean isMixedCluster() { + return upgradedNodes.isEmpty() == false && upgradedNodes.size() < NODE_NUM; + } + + protected static boolean isUpgradedCluster() { + return upgradedNodes.size() == NODE_NUM; + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected final boolean resetFeatureStates() { + return false; + } + + @Override + protected final boolean preserveIndicesUponCompletion() { + return true; + } + + @Override + protected final boolean preserveDataStreamsUponCompletion() { + return true; + } + + @Override + protected final boolean preserveReposUponCompletion() { + return true; + } + + @Override + protected boolean preserveTemplatesUponCompletion() { + return true; + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + @Override + protected final Settings restClientSettings() { + return Settings.builder() + .put(super.restClientSettings()) + // increase the timeout here to 90 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + .put(ESRestTestCase.CLIENT_SOCKET_TIMEOUT, "90s") + .build(); + } + + @Override + protected final String getEnsureGreenTimeout() { + // increase the timeout here to 70 seconds to handle long waits for a green + // cluster health. the waits for green need to be longer than a minute to + // account for delayed shards + return "70s"; + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java similarity index 61% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java index b8ce7fe7a6fbb..a01d379f68e76 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SnapshotBasedRecoveryIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.util.EntityUtils; @@ -40,100 +42,102 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.notNullValue; -public class SnapshotBasedRecoveryIT extends AbstractRollingTestCase { +public class SnapshotBasedRecoveryIT extends ParameterizedRollingUpgradeTestCase { + + public SnapshotBasedRecoveryIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } public void testSnapshotBasedRecovery() throws Exception { assumeFalse( "Cancel shard allocation command is broken for initial desired balance versions and might allocate shard " + "on the node where it is not supposed to be. Fixed by https://github.com/elastic/elasticsearch/pull/93635", - UPGRADE_FROM_VERSION == Version.V_8_6_0 || UPGRADE_FROM_VERSION == Version.V_8_6_1 || UPGRADE_FROM_VERSION == Version.V_8_7_0 + getOldClusterVersion() == Version.V_8_6_0 + || getOldClusterVersion() == Version.V_8_6_1 + || getOldClusterVersion() == Version.V_8_7_0 ); final String indexName = "snapshot_based_recovery"; final String repositoryName = "snapshot_based_recovery_repo"; final int numDocs = 200; - switch (CLUSTER_TYPE) { - case OLD -> { - Settings.Builder settings = Settings.builder() - .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") - .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster - createIndex(indexName, settings.build()); - ensureGreen(indexName); - indexDocs(indexName, numDocs); - flush(indexName, true); - registerRepository( - repositoryName, - "fs", - true, - Settings.builder() - .put("location", "./snapshot_based_recovery") - .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), true) - .build() - ); - createSnapshot(repositoryName, "snap", true); - updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)); - ensureGreen(indexName); - } - case MIXED, UPGRADED -> { - if (FIRST_MIXED_ROUND) { - List upgradedNodeIds = getUpgradedNodeIds(); - // It's possible that the test simply does a rolling-restart, i.e. it "upgrades" to - // the same version. In that case we proceed without excluding any node - if (upgradedNodeIds.isEmpty() == false) { - assertThat(upgradedNodeIds.size(), is(equalTo(1))); - String upgradedNodeId = upgradedNodeIds.get(0); - logger.info("--> excluding [{}] from node [{}]", indexName, upgradedNodeId); - updateIndexSettings(indexName, Settings.builder().put("index.routing.allocation.exclude._id", upgradedNodeId)); - ensureGreen(indexName); - logger.info("--> finished excluding [{}] from node [{}]", indexName, upgradedNodeId); - } else { - logger.info("--> no upgrading nodes, not adding any exclusions for [{}]", indexName); - } + if (isOldCluster()) { + Settings.Builder settings = Settings.builder() + .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms") + .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster + createIndex(indexName, settings.build()); + ensureGreen(indexName); + indexDocs(indexName, numDocs); + flush(indexName, true); + registerRepository( + repositoryName, + "fs", + true, + Settings.builder() + .put("location", "./snapshot_based_recovery") + .put(BlobStoreRepository.USE_FOR_PEER_RECOVERY_SETTING.getKey(), true) + .build() + ); + createSnapshot(repositoryName, "snap", true); + updateIndexSettings(indexName, Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)); + ensureGreen(indexName); + } else { + if (isFirstMixedCluster()) { + List upgradedNodeIds = getUpgradedNodeIds(); + // It's possible that the test simply does a rolling-restart, i.e. it "upgrades" to + // the same version. In that case we proceed without excluding any node + if (upgradedNodeIds.isEmpty() == false) { + assertThat(upgradedNodeIds.size(), is(equalTo(1))); + String upgradedNodeId = upgradedNodeIds.get(0); + logger.info("--> excluding [{}] from node [{}]", indexName, upgradedNodeId); + updateIndexSettings(indexName, Settings.builder().put("index.routing.allocation.exclude._id", upgradedNodeId)); + ensureGreen(indexName); + logger.info("--> finished excluding [{}] from node [{}]", indexName, upgradedNodeId); + } else { + logger.info("--> no upgrading nodes, not adding any exclusions for [{}]", indexName); + } - String primaryNodeId = getPrimaryNodeIdOfShard(indexName, 0); - Version primaryNodeVersion = getNodeVersion(primaryNodeId); + String primaryNodeId = getPrimaryNodeIdOfShard(indexName, 0); + Version primaryNodeVersion = getNodeVersion(primaryNodeId); - // Sometimes the primary shard ends on the upgraded node (i.e. after a rebalance) - // This causes issues when removing and adding replicas, since then we cannot allocate to any of the old nodes. - // That is an issue only for the first mixed round. - // In that case we exclude the upgraded node from the shard allocation and cancel the shard to force moving - // the primary to a node in the old version, this allows adding replicas in the first mixed round. - logger.info("--> Primary node in first mixed round {} / {}", primaryNodeId, primaryNodeVersion); - if (primaryNodeVersion.after(UPGRADE_FROM_VERSION)) { - logger.info("--> cancelling primary shard on node [{}]", primaryNodeId); - cancelShard(indexName, 0, primaryNodeId); - logger.info("--> done cancelling primary shard on node [{}]", primaryNodeId); + // Sometimes the primary shard ends on the upgraded node (i.e. after a rebalance) + // This causes issues when removing and adding replicas, since then we cannot allocate to any of the old nodes. + // That is an issue only for the first mixed round. + // In that case we exclude the upgraded node from the shard allocation and cancel the shard to force moving + // the primary to a node in the old version, this allows adding replicas in the first mixed round. + logger.info("--> Primary node in first mixed round {} / {}", primaryNodeId, primaryNodeVersion); + if (primaryNodeVersion.after(getOldClusterVersion())) { + logger.info("--> cancelling primary shard on node [{}]", primaryNodeId); + cancelShard(indexName, 0, primaryNodeId); + logger.info("--> done cancelling primary shard on node [{}]", primaryNodeId); - String currentPrimaryNodeId = getPrimaryNodeIdOfShard(indexName, 0); - assertThat(getNodeVersion(currentPrimaryNodeId), is(equalTo(UPGRADE_FROM_VERSION))); - } - } else { - logger.info("--> not in first upgrade round, removing exclusions for [{}]", indexName); - updateIndexSettings(indexName, Settings.builder().putNull("index.routing.allocation.exclude._id")); - logger.info("--> done removing exclusions for [{}]", indexName); + String currentPrimaryNodeId = getPrimaryNodeIdOfShard(indexName, 0); + assertThat(getNodeVersion(currentPrimaryNodeId), is(equalTo(getOldClusterVersion()))); } + } else { + logger.info("--> not in first upgrade round, removing exclusions for [{}]", indexName); + updateIndexSettings(indexName, Settings.builder().putNull("index.routing.allocation.exclude._id")); + logger.info("--> done removing exclusions for [{}]", indexName); + } - // Drop replicas - logger.info("--> dropping replicas from [{}]", indexName); - updateIndexSettingsPermittingSlowlogDeprecationWarning( - indexName, - Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) - ); - logger.info("--> finished dropping replicas from [{}], adding them back", indexName); - updateIndexSettingsPermittingSlowlogDeprecationWarning( - indexName, - Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - ); - logger.info("--> finished adding replicas from [{}]", indexName); - ensureGreen(indexName); + // Drop replicas + logger.info("--> dropping replicas from [{}]", indexName); + updateIndexSettingsPermittingSlowlogDeprecationWarning( + indexName, + Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + ); + logger.info("--> finished dropping replicas from [{}], adding them back", indexName); + updateIndexSettingsPermittingSlowlogDeprecationWarning( + indexName, + Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) + ); + logger.info("--> finished adding replicas from [{}]", indexName); + ensureGreen(indexName); - assertMatchAllReturnsAllDocuments(indexName, numDocs); - assertMatchQueryReturnsAllDocuments(indexName, numDocs); - } - default -> throw new IllegalStateException("unknown type " + CLUSTER_TYPE); + assertMatchAllReturnsAllDocuments(indexName, numDocs); + assertMatchQueryReturnsAllDocuments(indexName, numDocs); } } @@ -145,7 +149,7 @@ private List getUpgradedNodeIds() throws IOException { List upgradedNodes = new ArrayList<>(); for (Map.Entry> nodeInfoEntry : nodes.entrySet()) { Version nodeVersion = Version.fromString(extractValue(nodeInfoEntry.getValue(), "version")); - if (nodeVersion.after(UPGRADE_FROM_VERSION)) { + if (nodeVersion.after(getOldClusterVersion())) { upgradedNodes.add(nodeInfoEntry.getKey()); } } diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java similarity index 95% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java index 48c3007bb2674..f6de3f4e48d68 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/SystemIndicesUpgradeIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.index.IndexVersion; @@ -21,13 +23,17 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -public class SystemIndicesUpgradeIT extends AbstractRollingTestCase { +public class SystemIndicesUpgradeIT extends ParameterizedRollingUpgradeTestCase { + + public SystemIndicesUpgradeIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } @SuppressWarnings("unchecked") public void testSystemIndicesUpgrades() throws Exception { final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct " + "access to system indices will be prevented by default"; - if (CLUSTER_TYPE == ClusterType.OLD) { + if (isOldCluster()) { // create index Request createTestIndex = new Request("PUT", "/test_index_old"); createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); @@ -99,7 +105,7 @@ public void testSystemIndicesUpgrades() throws Exception { })); assertThat(client().performRequest(putAliasRequest).getStatusLine().getStatusCode(), is(200)); } - } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + } else if (isUpgradedCluster()) { assertBusy(() -> { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); Map indices = new JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))).get( diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java similarity index 90% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java index 19f24c97a47f8..f8464be894ac9 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/TsdbIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.common.time.DateFormatter; @@ -24,7 +26,11 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class TsdbIT extends AbstractRollingTestCase { +public class TsdbIT extends ParameterizedRollingUpgradeTestCase { + + public TsdbIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } private static final String TEMPLATE = """ { @@ -88,21 +94,21 @@ public class TsdbIT extends AbstractRollingTestCase { private static final String BULK = """ {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507","ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}} {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508","ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508", "ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}} {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509","ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509", "ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}} {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510","ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510", "ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}} {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9","ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}} {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "tiger", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea10","ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "tiger", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea10", "ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}} {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "lion", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876e11","ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "lion", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876e11", "ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}} {"create": {}} - {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876eb4","ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876eb4", "ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}} """; private static final String DOC = """ @@ -125,11 +131,11 @@ public class TsdbIT extends AbstractRollingTestCase { public void testTsdbDataStream() throws Exception { assumeTrue( - "Skipping version [" + UPGRADE_FROM_VERSION + "], because TSDB was GA-ed in 8.7.0", - UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_7_0) + "Skipping version [" + getOldClusterVersion() + "], because TSDB was GA-ed in 8.7.0", + getOldClusterVersion().onOrAfter(Version.V_8_7_0) ); String dataStreamName = "k8s"; - if (CLUSTER_TYPE == ClusterType.OLD) { + if (isOldCluster()) { final String INDEX_TEMPLATE = """ { "index_patterns": ["$PATTERN"], @@ -144,20 +150,20 @@ public void testTsdbDataStream() throws Exception { assertOK(client().performRequest(putIndexTemplateRequest)); performOldClustertOperations(templateName, dataStreamName); - } else if (CLUSTER_TYPE == ClusterType.MIXED) { + } else if (isMixedCluster()) { performMixedClusterOperations(dataStreamName); - } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + } else if (isUpgradedCluster()) { performUpgradedClusterOperations(dataStreamName); } } public void testTsdbDataStreamWithComponentTemplate() throws Exception { assumeTrue( - "Skipping version [" + UPGRADE_FROM_VERSION + "], because TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", - UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_7_0) && UPGRADE_FROM_VERSION.before(Version.V_8_11_0) + "Skipping version [" + getOldClusterVersion() + "], because TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", + getOldClusterVersion().onOrAfter(Version.V_8_7_0) && getOldClusterVersion().before(Version.V_8_11_0) ); String dataStreamName = "template-with-component-template"; - if (CLUSTER_TYPE == ClusterType.OLD) { + if (isOldCluster()) { final String COMPONENT_TEMPLATE = """ { "template": $TEMPLATE @@ -181,9 +187,9 @@ public void testTsdbDataStreamWithComponentTemplate() throws Exception { assertOK(client().performRequest(putIndexTemplateRequest)); performOldClustertOperations(templateName, dataStreamName); - } else if (CLUSTER_TYPE == ClusterType.MIXED) { + } else if (isMixedCluster()) { performMixedClusterOperations(dataStreamName); - } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + } else if (isUpgradedCluster()) { performUpgradedClusterOperations(dataStreamName); var dataStreams = getDataStream(dataStreamName); @@ -242,7 +248,7 @@ private void performUpgradedClusterOperations(String dataStreamName) throws Exce private static void performMixedClusterOperations(String dataStreamName) throws IOException { ensureHealth(dataStreamName, request -> request.addParameter("wait_for_status", "yellow")); - if (FIRST_MIXED_ROUND) { + if (isFirstMixedCluster()) { indexDoc(dataStreamName); } assertSearch(dataStreamName, 9); diff --git a/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java new file mode 100644 index 0000000000000..3c6e15a78fc1f --- /dev/null +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import com.carrotsearch.randomizedtesting.annotations.Name; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Strings; + +import java.io.IOException; +import java.util.Map; + +import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.hamcrest.Matchers.is; + +public class UpgradeWithOldIndexSettingsIT extends ParameterizedRollingUpgradeTestCase { + + public UpgradeWithOldIndexSettingsIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } + + private static final String INDEX_NAME = "test_index_old_settings"; + private static final String EXPECTED_WARNING = "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will " + + "be removed in a future release! See the breaking changes documentation for the next major version."; + + private static final String EXPECTED_V8_WARNING = "[index.indexing.slowlog.level] setting was deprecated in the previous Elasticsearch" + + " release and is removed in this release."; + + public void testOldIndexSettings() throws Exception { + if (isOldCluster()) { + Request createTestIndex = new Request("PUT", "/" + INDEX_NAME); + createTestIndex.setJsonEntity("{\"settings\": {\"index.indexing.slowlog.level\": \"WARN\"}}"); + createTestIndex.setOptions(expectWarnings(EXPECTED_WARNING)); + if (getOldClusterVersion().before(Version.V_8_0_0)) { + // create index with settings no longer valid in 8.0 + client().performRequest(createTestIndex); + } else { + assertTrue( + expectThrows(ResponseException.class, () -> client().performRequest(createTestIndex)).getMessage() + .contains("unknown setting [index.indexing.slowlog.level]") + ); + + Request createTestIndex1 = new Request("PUT", "/" + INDEX_NAME); + client().performRequest(createTestIndex1); + } + + // add some data + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + if (getOldClusterVersion().before(Version.V_8_0_0)) { + bulk.setOptions(expectWarnings(EXPECTED_WARNING)); + } + bulk.setJsonEntity(Strings.format(""" + {"index": {"_index": "%s"}} + {"f1": "v1", "f2": "v2"} + """, INDEX_NAME)); + client().performRequest(bulk); + } else if (isMixedCluster()) { + // add some more data + Request bulk = new Request("POST", "/_bulk"); + bulk.addParameter("refresh", "true"); + if (getOldClusterVersion().before(Version.V_8_0_0)) { + bulk.setOptions(expectWarnings(EXPECTED_WARNING)); + } + bulk.setJsonEntity(Strings.format(""" + {"index": {"_index": "%s"}} + {"f1": "v3", "f2": "v4"} + """, INDEX_NAME)); + client().performRequest(bulk); + } else { + if (getOldClusterVersion().before(Version.V_8_0_0)) { + Request createTestIndex = new Request("PUT", "/" + INDEX_NAME + "/_settings"); + // update index settings should work + createTestIndex.setJsonEntity("{\"index.indexing.slowlog.level\": \"INFO\"}"); + createTestIndex.setOptions(expectWarnings(EXPECTED_V8_WARNING)); + client().performRequest(createTestIndex); + + // ensure we were able to change the setting, despite it having no effect + Request indexSettingsRequest = new Request("GET", "/" + INDEX_NAME + "/_settings"); + Map response = entityAsMap(client().performRequest(indexSettingsRequest)); + + var slowLogLevel = (String) (XContentMapValues.extractValue( + INDEX_NAME + ".settings.index.indexing.slowlog.level", + response + )); + + // check that we can read our old index settings + assertThat(slowLogLevel, is("INFO")); + } + assertCount(INDEX_NAME, 2); + } + } + + private void assertCount(String index, int countAtLeast) throws IOException { + Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); + searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); + searchTestIndexRequest.addParameter("filter_path", "hits.total"); + Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); + Map response = entityAsMap(searchTestIndexResponse); + + var hitsTotal = (Integer) (XContentMapValues.extractValue("hits.total", response)); + + assertTrue(hitsTotal >= countAtLeast); + } + + public static void updateIndexSettingsPermittingSlowlogDeprecationWarning(String index, Settings.Builder settings) throws IOException { + Request request = new Request("PUT", "/" + index + "/_settings"); + request.setJsonEntity(org.elasticsearch.common.Strings.toString(settings.build())); + if (getOldClusterVersion().before(Version.V_7_17_9)) { + // There is a bug (fixed in 7.17.9 and 8.7.0 where deprecation warnings could leak into ClusterApplierService#applyChanges) + // Below warnings are set (and leaking) from an index in this test case + request.setOptions(expectVersionSpecificWarnings(v -> { + v.compatible( + "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will be removed in a future release! " + + "See the breaking changes documentation for the next major version." + ); + })); + } + client().performRequest(request); + } +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/XPackIT.java similarity index 93% rename from qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java rename to qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/XPackIT.java index 40e63b4ae32d7..b63e1147442a7 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/XPackIT.java +++ b/qa/rolling-upgrade/src/javaRestTest/java/org/elasticsearch/upgrades/XPackIT.java @@ -7,6 +7,8 @@ */ package org.elasticsearch.upgrades; +import com.carrotsearch.randomizedtesting.annotations.Name; + import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Request; import org.junit.Before; @@ -20,7 +22,12 @@ * Basic tests for simple xpack functionality that are only run if the * cluster is the on the default distribution. */ -public class XPackIT extends AbstractRollingTestCase { +public class XPackIT extends ParameterizedRollingUpgradeTestCase { + + public XPackIT(@Name("upgradeNode") Integer upgradeNode) { + super(upgradeNode); + } + @Before public void skipIfNotXPack() { assumeThat( @@ -28,10 +35,9 @@ public void skipIfNotXPack() { System.getProperty("tests.distribution"), equalTo("default") ); - assumeThat( + assumeTrue( "running this on the unupgraded cluster would change its state and it wouldn't work prior to 6.3 anyway", - CLUSTER_TYPE, - equalTo(ClusterType.UPGRADED) + isUpgradedCluster() ); /* * *Mostly* we want this for when we're upgrading from pre-6.3's diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java deleted file mode 100644 index 35688e7c244cf..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeWithOldIndexSettingsIT.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.upgrades; - -import org.elasticsearch.Version; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.core.Strings; - -import java.io.IOException; -import java.util.Map; - -import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; -import static org.hamcrest.Matchers.is; - -public class UpgradeWithOldIndexSettingsIT extends AbstractRollingTestCase { - - private static final String INDEX_NAME = "test_index_old_settings"; - private static final String EXPECTED_WARNING = "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will " - + "be removed in a future release! See the breaking changes documentation for the next major version."; - - private static final String EXPECTED_V8_WARNING = "[index.indexing.slowlog.level] setting was deprecated in the previous Elasticsearch" - + " release and is removed in this release."; - - @SuppressWarnings("unchecked") - public void testOldIndexSettings() throws Exception { - switch (CLUSTER_TYPE) { - case OLD -> { - Request createTestIndex = new Request("PUT", "/" + INDEX_NAME); - createTestIndex.setJsonEntity("{\"settings\": {\"index.indexing.slowlog.level\": \"WARN\"}}"); - createTestIndex.setOptions(expectWarnings(EXPECTED_WARNING)); - if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { - // create index with settings no longer valid in 8.0 - client().performRequest(createTestIndex); - } else { - assertTrue( - expectThrows(ResponseException.class, () -> client().performRequest(createTestIndex)).getMessage() - .contains("unknown setting [index.indexing.slowlog.level]") - ); - - Request createTestIndex1 = new Request("PUT", "/" + INDEX_NAME); - client().performRequest(createTestIndex1); - } - - // add some data - Request bulk = new Request("POST", "/_bulk"); - bulk.addParameter("refresh", "true"); - if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { - bulk.setOptions(expectWarnings(EXPECTED_WARNING)); - } - bulk.setJsonEntity(Strings.format(""" - {"index": {"_index": "%s"}} - {"f1": "v1", "f2": "v2"} - """, INDEX_NAME)); - client().performRequest(bulk); - } - case MIXED -> { - // add some more data - Request bulk = new Request("POST", "/_bulk"); - bulk.addParameter("refresh", "true"); - if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { - bulk.setOptions(expectWarnings(EXPECTED_WARNING)); - } - bulk.setJsonEntity(Strings.format(""" - {"index": {"_index": "%s"}} - {"f1": "v3", "f2": "v4"} - """, INDEX_NAME)); - client().performRequest(bulk); - } - case UPGRADED -> { - if (UPGRADE_FROM_VERSION.before(Version.V_8_0_0)) { - Request createTestIndex = new Request("PUT", "/" + INDEX_NAME + "/_settings"); - // update index settings should work - createTestIndex.setJsonEntity("{\"index.indexing.slowlog.level\": \"INFO\"}"); - createTestIndex.setOptions(expectWarnings(EXPECTED_V8_WARNING)); - client().performRequest(createTestIndex); - - // ensure we were able to change the setting, despite it having no effect - Request indexSettingsRequest = new Request("GET", "/" + INDEX_NAME + "/_settings"); - Map response = entityAsMap(client().performRequest(indexSettingsRequest)); - - var slowLogLevel = (String) (XContentMapValues.extractValue( - INDEX_NAME + ".settings.index.indexing.slowlog.level", - response - )); - - // check that we can read our old index settings - assertThat(slowLogLevel, is("INFO")); - } - assertCount(INDEX_NAME, 2); - } - } - } - - private void assertCount(String index, int countAtLeast) throws IOException { - Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); - searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - searchTestIndexRequest.addParameter("filter_path", "hits.total"); - Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); - Map response = entityAsMap(searchTestIndexResponse); - - var hitsTotal = (Integer) (XContentMapValues.extractValue("hits.total", response)); - - assertTrue(hitsTotal >= countAtLeast); - } - - public static void updateIndexSettingsPermittingSlowlogDeprecationWarning(String index, Settings.Builder settings) throws IOException { - Request request = new Request("PUT", "/" + index + "/_settings"); - request.setJsonEntity(org.elasticsearch.common.Strings.toString(settings.build())); - if (UPGRADE_FROM_VERSION.before(Version.V_7_17_9)) { - // There is a bug (fixed in 7.17.9 and 8.7.0 where deprecation warnings could leak into ClusterApplierService#applyChanges) - // Below warnings are set (and leaking) from an index in this test case - request.setOptions(expectVersionSpecificWarnings(v -> { - v.compatible( - "[index.indexing.slowlog.level] setting was deprecated in Elasticsearch and will be removed in a future release! " - + "See the breaking changes documentation for the next major version." - ); - })); - } - client().performRequest(request); - } -} diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterInfoRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterInfoRestCancellationIT.java index d41eeab7aef26..cbab099f9cba8 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterInfoRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterInfoRestCancellationIT.java @@ -26,8 +26,8 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.AcceptChannelHandler; import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.transport.netty4.TLSConfig; diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml index 767e898792f20..030ce2e2332b1 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml @@ -30,7 +30,7 @@ setup: refresh: true body: my_field: [ - -457.1953,259.6788,271.9127,-26.8833,403.0915,-56.9197,-445.8869,-108.8167,417.8988,13.4232,-281.765,-405.8573,262.7831,-279.493,328.5591,-453.3941,-116.0368,435.4734,-439.0927,-332.9565,355.4955,324.9878,33.3519,-165.0182,188.1811,467.3455,185.1057,-233.8598,-17.6827,283.4271,-329.1247,-402.9721,404.7866,-358.7031,-267.4074,441.8363,320.2389,-128.0179,339.544,196.2018,-60.2688,336.0228,-440.1943,318.6882,-158.2596,277.0925,-487.4971,-338.9865,-275.716,136.8547,-253.6206,-40.2807,-357.0971,188.0344,-203.0674,449.9618,-223.2508,468.1441,302.4002,-65.0044,342.4431,205.6774,-118.636,-29.9706,183.9825,223.956,314.0691,137.0129,-8.0452,-15.131,-269.8643,-12.691,228.9777,-147.8384,-347.1117,-283.1905,459.2004,296.1321,-483.1799,414.3423,383.0187,-408.5525,-286.8169,482.5853,9.5232,-459.4968,-333.2521,109.0969,129.5107,43.4369,455.8283,-4.0423,-318.5019,339.1641,416.3581,-309.0429,84.2325,-355.8753,264.7671,43.8922,-298.6039,412.4413,19.4198,-251.279,-191.157,-478.2058,251.5709,-178.9633,479.293,188.399,380.9755,268.6575,120.3467,-322.0305,-255.4894,-377.515,56.9153,-133.9486,156.2546,-428.9581,-54.994,28.2146,158.7121,-426.7307,491.0086,-150.7205,-233.1005,244.5174,45.911,-406.1181,233.1636,175.9334,414.2805,421.7396,-322.8029,-252.2412,35.7622,318.5223,-141.5121,-375.4407,380.3081,222.1228,443.7844,367.377,-202.9594,-493.6231,-184.2242,-253.9838,463.1952,-416.3887,252.0867,-63.5317,411.0727,98.6261,330.7369,363.5685,-498.1848,-413.7246,-2.5996,-238.3547,-355.6041,-303.698,43.6266,383.1105,-72.3066,274.7491,321.9322,220.9543,-30.5578,400.0891,-181.7069,-386.4403,497.2206,-408.9611,138.485,-133.5666,-340.2569,-223.6313,270.884,-215.9399,74.3931,-244.1364,353.4219,-156.9905,488.3148,96.352,401.8525,-468.8344,129.9715,-27.1953,-168.631,187.7049,-336.5255,331.0652,204.3538,36.0182,366.8502,-468.6579,478.1409,-332.6136,-281.8499,63.7165,-458.8161,14.8894,-145.6397,267.1499,85.2025,326.3764,-419.6361,-133.9626,102.0618,443.3099,-207.9032,132.7032,234.001,-26.0754,105.6478,174.1252,-403.3511,-164.9714,-262.9344,-58.9668,357.6414,355.7508,-331.8443,153.5733,417.5712,260.7394,-150.1053,-435.6525,-364.1558,328.6183,-270.0863,107.1746,345.7998,480.8749,206.3896,-498.237,495.0835,481.9384,418.5571,-246.5213,-363.7304,311.7076,-53.1664,-297.3839,122.3105,-13.9226,-145.9754,-189.1748,460.9375,194.5417,-28.1346,-261.2177,-88.8396,-254.6407,-465.3148,-169.5377,24.3113,-116.2323,-420.3526,317.2107,-231.6227,-270.8239,387.8598,412.4251,428.1373,308.2044,275.2082,402.3663,-209.9843,-492.7269,225.1948,326.469,207.3557,-131.7677,371.9408,-139.3098,324.205,-126.6204,-335.0853,-248.2587,-344.907,307.2109,-441.3296,-318.027,414.6535,172.0537,-280.4991,331.0475,-158.0178,-285.1951,12.3632,149.9347,282.8302,-91.5624,-180.6097,496.0881,368.2567,357.6875,-194.2106,48.9213,-479.2956,-165.139,238.7811,302.7007,297.2805,208.7099,-5.5755,-85.7911,-358.1111,344.6131,415.7199,-219.1525,490.5003,-46.0096,498.2818,-91.8067,384.0104,396.1107,408.2827,-5.3919,-333.7992,-168.985,273.72,359.7125,227.7621,158.3406,-366.9722,3.7709,27.2728,71.9754,269.5792,-365.281,117.9152,-184.3682,356.9013,-142.6579,-496.7598,122.0194,89.1247,4.1914,-81.9905,465.0841,115.4727,169.6116,-199.9951,-223.3149,-447.3022,11.831,320.2368,105.1316,344.2462,8.6333,62.2285,-70.3944,-284.6694,-482.4229,-448.1569,-237.7858,222.3921,-172.1386,-312.5756,-390.0565,398.951,119.9784,-419.6537,121.3186,481.3011,-181.6662,-56.0219,424.1359,7.1461,138.8567,-307.0606,334.066,254.0897,473.7227,45.5936,133.7268,49.5334,-283.3406,179.4466,105.6191,-30.4162,271.5774,6.1156,110.4732,286.4325,13.3431,494.0139,-371.7624,283.3652,272.0558,-302.343,122.7245,-463.9261,299.9807,282.4502,-262.4911,183.4289,222.7474,-229.5973,141.6188,262.5468,278.1155,-331.0891,-393.6027,-230.1461,201.6657,-93.3604,-395.8877,-125.2013,-222.973,368.3759,234.6628,-28.6809,-151.0703,432.0315,253.1214,430.7065,-143.6963,499.84,85.1683,280.4354,196.6013,139.0476,120.8148,-398.8155,-335.5504,229.0516,403.8604,-383.9868,-79.975,-152.77,220.4036,135.0355,238.2176,-242.3085,-177.0743,381.8202,411.167,378.0153,456.5976,364.013,24.2316,-395.4659,-210.2581,138.7539,479.7398,-291.7797,-123.0491,188.9817,42.8931,-354.4479,358.853,-43.6168,-190.6656,-103.3037,47.8915,-358.5402,374.9758,493.9951,-427.2376,-119.1142,-453.2975,-326.2696,-212.8273,-142.2931,-179.795,355.77,-156.2903,331.2006,451.9252,185.2944,-96.1941,173.0447,345.2744,43.0151,381.7845,-143.4125,84.654,-208.7053,-293.141,333.6349,-80.472,-376.9817,214.6298,-43.0931,-254.7834,-421.6961,-368.844,467.5544,-418.61,-66.6824,-350.2671,348.8241,252.3495,41.8677,-128.869,90.0391,-136.7405,-136.7822,489.8074,-396.8204,63.8355,323.9557,-83.6674,451.263,152.8955,-291.7497,410.0787,-299.7468,51.34,-298.6066,-58.853,325.911,-281.9541,-15.3457,299.1325,-347.4959,388.407,343.1096,28.1816,24.3013,-111.3312,190.5583,279.9848,-479.8894,123.2182,233.8425,-466.2128,-134.7122,217.8674,432.9523,-186.799,-477.2512,-223.5514,64.274,141.5251,-161.2187,150.2791,-228.1087,81.172,451.0879,-230.3818,-304.9398,402.1081,199.1266,275.3423,-123.9548,-21.1815,-384.544,446.9626,208.9692,-337.4827,-58.1011,344.2642,230.2868,44.9176,245.9885,-284.1875,-351.6104,108.1289,459.649,191.4334,53.591,136.7139,10.5912,-15.8411,62.8305,448.5256,194.7705,-356.3214,84.4996,-133.2502,-358.6308,262.7949,219.8741,-355.3985,468.2922,243.7227,-408.3166,188.6111,-221.7264,-286.8234,-340.3046,-224.5375,332.2615,73.2788,-24.7857,-485.2204,-136.7196,-162.9693,92.6017,-99.611,-186.5203,495.5483,240.8051,409.6493,-58.1321,-154.1239,-335.9719,-82.4408,-471.3057,-43.373,301.0884,-96.6359,-236.6906,435.7313,-227.7263,-406.8904,-392.3187,169.0043,-371.0852,-271.3652,-57.4466,-196.8455,52.741,361.7395,-117.8599,190.5339,276.6457,-321.9851,425.881,-473.2662,-74.2968,221.3612,-465.4429,181.723,-78.4508,21.6152,148.8107,-166.1687,-281.6391,-462.3636,-420.5255,-161.4143,98.8383,-374.5345,-366.2851,187.1506,-405.1865,239.4847,-246.8352,33.1748,-344.1211,477.9759,-294.1354,-359.5015,-44.8454,151.7072,-22.7324,-260.3293,99.1414,-20.5536,173.3766,-422.6692,458.3853,-199.7898,-236.3929,365.2599,-66.4191,388.3472,283.0336,-268.9463,269.5704,360.9679,-322.102,-407.0705,-93.0994,338.9108,-189.1359,-216.9102,-249.0153,122.6058,-254.8318,-112.2771,-279.0506,-168.4431,392.888,394.7607,468.0544,340.1852,-293.1288,-8.2912,-419.2608,323.3382,-93.8793,-242.0672,427.7716,-441.6906,128.3229,424.4679,-71.8586,134.5411,-74.5205,18.4141,17.7277,126.9123,-137.6119,33.3783,222.9912,-279.3582,89.1226,-90.031,12.7221,98.7767,-80.2372,-485.9212,-481.6575,-325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-201.118,332.1516,425.2758,138.1284,-229.4302,432.9081,2.9898,-437.7631,-448.2151,129.9126,-170.2405,499.0396,-48.2137,363.8046,-423.2511,-28.0804,-267.826,-356.6288,-99.9371,-409.8465,170.4902,-269.2584,-277.4098,300.8819,-142.5889,339.0952,16.2275,-310.8646,201.0733,-495.5905,341.9279,-149.1184,-494.4928,-81.7343,209.9762,273.4892,380.3163,359.2424,-242.5,-42.1268,-303.9792,11.6018,361.5483,416.4178,10.3282,195.9796,148.8096,-60.9724,-205.5221,-145.4574,-341.5913,426.8996,-19.5843,60.6265,-133.4191,-139.8737,281.7465,461.2854,-270.8902,61.0182,-58.6791,-254.0193,-234.1206,-208.7334,39.7498,-14.337,-68.2319,-342.2756,403.6834,401.6122,-166.1637,47.3592,-325.7,274.5459,343.4873,328.3783,-370.1657,-122.8967,-231.3182,122.6609,119.2685,-223.5437,-210.8076,116.5022,340.2814,256.1852,-217.3487,-150.9598,331.1343,-453.8182,-448.0842,-95.2475,-340.9942,-416.7835,-96.7226,-328.7212,-373.4337,472.2214,-484.522,-465.1583,330.0712,73.2052,-55.1266,-352.8984,341.0742,-230.4845,321.0752,236.2116,35.1902,75.3489,-469.4042,110.2036,35.1156,454.7224,103.0685,-221.7499,-23.6898,-259.2362,-110.509,-261.0039,219.2391,-139.9404,155.7723,377.9713,434.0318,-365.1397,459.1471,-318.5774,323.4256,194.325,-311.9529,-153.9019,-346.5811,76.4069,443.2121,-199.407,495.6636,-138.5213,-145.3432,-151.7758,-365.3547,263.6507,-491.1686,-183.5585,-12.6044,318.5346,-443.8639,-179.0338,477.9093,-355.5118,-423.0035,-229.1166,-96.7782,-479.2384,192.9085,223.3407,-302.9472,297.3847,477.584,-297.5958,168.6023,-80.6912,-89.8717,87.1476,-129.7807,346.5576,-253.9729,-399.6858,-389.5785,35.1648,-180.451,-49.6084,83.9582,-185.2329,97.283,195.5249,-91.6969,199.202,-449.792,333.4825,-113.7558,443.434,394.3587,-94.9074,71.2092,-251.1774,-85.047,-46.4004,20.2595,341.1073,-91.2527,86.3775,303.1247,-336.9011,343.9894,-384.1261,154.4411,-465.2493,-63.3249,488.0231,348.6725,458.2093,322.401,220.2532,283.3734,-386.4252,-256.5262,-87.2205,96.8199,47.6908,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-84.9038,358.3475,-88.9271,499.7721,-161.7403,355.4894,313.6211,-176.1703,61.8427,107.603,-176.063,-426.5408,292.3612,58.3331,-115.8853,471.4131,-76.4815,-309.6263,361.4518,192.4763,-145.7968,256.3888,133.335,-474.0901,-366.9793,-495.223,457.2366,170.056,285.0152,89.8213,225.2251,354.1822,-298.374,-332.9164,-55.2409,306.9283,25.9392,218.0624,7.5085,-151.8768,-155.4932,6.0001,201.4506,-259.9874,485.1078,-362.8516,-230.1434,-398.2512,243.0012,32.302,-197.91,144.1195,-89.4196,-44.0399,-371.7866,227.6007,492.7526,499.3824,162.2475,279.0325,177.0781,341.0137,199.6009,108.1678,312.2319,-211.5001,-92.675,357.0513,-337.924,-348.984,-350.3677,173.3473,-193.7346,-318.5609,-2.0928,46.6287,-346.8513,36.634,-277.4949,-149.325,481.1378,370.3864,-139.6689,-332.2805,48.0292,109.8363,494.6994,373.6992,495.7442,400.4998,-26.2276,-308.7669,188.9497,257.9182,-116.6944,269.8932,197.005,123.1139,-356.2058,485.1982,-4.0119,397.8434,-204.67,-494.5133,-414.1299,142.1512,-36.5446,390.0718,6.9876,263.1216,457.5598,89.6086,-266.3804,17.3457,88.8182,236.6271,81.175,-170.2249,-5.7664,422.7852,180.3349,-135.2642,149.2285,-70.6607,-46.169,-389.3313,230.6125,388.4853,-438.3426,111.8034,300.0416,37.5604,-437.3868,-114.1336,312.7777,-99.1161,-312.9015,-147.3787,-434.0536,19.5034,141.706,-281.4504,-208.9608,281.4619,-361.0596,-464.2757,77.8205,232.5575,165.4104,424.8738,124.5555,342.038,86.7543,278.0216,311.2686,337.834,-90.0545,-210.1143,-488.4095,-80.7535,92.3731,-122.622,-288.0571,1.7285,-5.2998,100.0717,-395.0571,-477.5587,-160.5642,-119.4214,-232.233,415.7276,-204.3216,-436.7766,-103.4644,-427.0939,-31.0927,-440.2919,120.5971,-223.3623,-199.0988,304.8697,432.5731,-231.5791,-397.696,306.4134,330.1018,32.4345,-175.719,464.6091,-291.5686,300.1631,-167.4592,238.9574,104.5893,-187.2215,-294.0111,-361.9094,480.6847,-304.2133,-448.7144,67.7235,-255.9669,254.7379,464.5465,6.8909,-368.7554,337.5993,39.1928,-376.0625,433.4224,-109.1488,341.7731,377.843,446.839,-192.283,251.1592,437.6812,-478.3409,345.7668,377.965,125.6188,-462.0904,-235.3324,316.8892,-460.7371,248.9306,418.7082,-333.7257,-104.5062,-408.1356,148.6624,-158.4929,-477.0664,80.4926,-214.6292,211.3377,322.7854,-312.851,403.0215,-213.3089,-71.3355,-276.1068,-293.0902,-277.4559,54.2176,-119.1285,-479.4361,-492.6072,8.3732,42.4988,-5.576,-198.6151,-357.0952,-331.5667,186.6195,317.3075,201.267,-37.1731,-278.3164,-467.7796,-163.3909,-117.305,-233.9266,277.7969,181.9723,178.8292,-168.7152,-436.041,171.345,369.0302,423.7144,434.0961,-428.1816,23.7334,-136.6735,-222.4486,180.8461,57.5968,129.2984,127.1866,-109.3928,-143.6253,-385.9948,127.9867,-8.8096,-239.844,66.6491,-50.7301,-309.1113,-474.6991,212.1767,-444.4596,-211.3601,351.3551,335.0507,-128.6226,-98.5249,-257.454,489.8014,-378.8622,311.0304,-4.9107,362.7586,-458.8825,373.2779,-103.29,-5.6216,122.0183,76.9731,17.8771,289.8893,-56.4338,375.9665,-83.9991,440.0823,142.2309,-471.0813,-59.4847,-400.4217,91.4892,374.4009,486.8697,414.5213,-0.3535,-278.2345,-231.206,-238.479,389.3143,-276.9742,-33.9869,349.1201,127.3928,-410.7213,337.3789,36.4048,333.4291,-12.4075,483.8778,311.4489,-74.0628,-379.6051,463.234,157.5614,-140.9455,120.7926,-161.2341,194.162,-412.6181,-9.1258,-194.5065,441.1572,255.5455,-73.8086,-119.4013,-486.4792,-27.4352,98.9738,-119.002,-75.5589,261.7675,156.0993,89.6457,-190.6318,429.9325,195.9536,-172.6155,-22.7976,438.9412,-246.4661,447.7281,434.5346,405.8957,217.3324,392.6129,-158.604,15.8632,483.0414,334.7693,-307.2482,302.1267,-7.4125,3.8081,-405.7316,377.5069,51.2307,235.0695,269.737,-389.3487,186.4225,-36.8521,401.2051,-59.0378,-190.8023,-182.8076,-362.6136,-124.8064,362.4142,45.3344,-330.1214,-162.5452,-434.4411,219.1143,-374.1038,364.5639,-268.582,-22.9247,-73.8849,-54.5258,-23.0882,167.9233,-181.9807,-207.1173,300.2193,206.5903,-72.013,-244.4396,-435.5389,10.3523,-435.3545,-138.8392,449.8426,-244.8971,229.7666,267.5225,-401.6021,466.3278,418.3623,-317.8205,28.5192,384.5628,-79.6177,469.4532,-395.1986,-353.4477,-93.6914,70.3999,-441.0627,-201.1221,141.2748,433.3389,82.413,-394.0046,-438.6836,453.4704,-160.6535,353.0374,-238.0377,236.5195,497.9019,202.9472,-421.6417,-382.042,84.6308,430.1599,-390.9918,-195.0401,255.6526,-86.5964,-491.667,-199.1557,-102.7114,474.877,-292.9154,-77.3163,143.5625,58.8126,-284.8908,-457.6457,212.5317,480.4032,-324.0829,491.0165,-494.7934,267.4311,-142.2401,-368.9058,-370.4955,498.803,-6.7377,-395.373,177.8868,306.9761,80.4185,-239.1253,-435.1349,7.6298,-157.6242,348.6095,475.7845,317.7116,-353.7336,-40.2881,353.7096,-60.9783,-385.5816,243.8071,-398.8341,62.343,340.0251,-24.8105,-343.4186,189.6737,-467.3026,104.7127,159.5467,-482.5496,71.6951,-163.5304,-321.8438,185.2875,-331.6885,-102.6817,-242.7548,-259.4407,220.6898,231.6571,-297.1145,-186.9472,-316.9286,-36.2392,-293.964,296.3878,467.7409,-277.6389,493.2143,417.1244,12.241,-343.7893,-33.7207,457.2978,-248.9726,-409.5439,-92.4779,-173.7584,400.8483,59.7439,13.3265,-175.617,37.333,-307.6469,-82.3687,332.578,-412.0079,144.7037,350.6506,423.3235,-53.2147,67.9581,-447.3845,-461.0187,371.1702,386.2045,352.2722,-119.098,123.9178,-52.0535,465.2626,474.0272,402.9961,491.4763,-33.1373,-228.8607,-383.3299,408.8192,-275.155,489.8633,-349.5073,346.9781,129.3929,282.1868,-77.3384,277.3026,412.3277,263.6705,473.3756,-437.9988,114.1686,-452.3331,-167.8898,-193.6217,444.6168,-354.3223,-238.0967,432.0883,-349.7249,-42.3659,-304.7343,296.2192,-136.5386,-121.7774,450.4678,140.5384,-450.8993,93.8942,-54.4945,498.521,-461.7182,111.5166,-397.6007,-397.959,-20.9331,-19.7068,78.551,161.9472,-24.8682,-434.4537,102.9447,214.298,-494.3813,211.6782,64.8196,372.6962,-399.8337,114.5476,-191.0045,-369.6465,-391.7201,-204.9951,-201.7654,475.898,-262.3247,-348.6974,79.4062,-112.4281,-102.266,67.3008,335.485,68.4289,-433.9104,-392.963,-73.3788,276.5766,-105.2219,422.6201,192.915,-388.3541,242.3915,479.5633,42.5998,259.6189,-316.5861,390.1121,-216.0274,-373.296,103.7169,321.9107,19.0023,487.2627,151.6922,276.7424,461.6928,24.4758,133.263,-47.289,-413.9538,435.2414,-466.9724,-270.6602,238.9442,-110.5389,403.5151,-395.4393,-208.2219,-53.0773,-26.5792,-387.6534,-120.5566,143.2237,-305.3778,442.0665,417.9523,460.3337,254.8689,-375.9436,-101.0153,232.4727,-35.5285,-470.3007,-423.9161,-108.9997,-29.6555,233.1043,240.4766,404.763,276.8465,-354.4058,74.0678,-343.244,332.9786,361.2964,-322.0828,-41.1861,-122.8074,-299.5682,-481.218,-157.3994,310.6317,-261.176,310.2644,-239.9855,255.1004,-311.3351,437.9486,78.1311,-133.9261,-176.2119,45.9943,492.3169,266.5795,16.8553,-470.9413,-331.2718,218.4122,369.7118,-179.3201,-165.7277,-87.9832,357.6499,-261.0345,442.1609,113.2997,-112.5643,481.2426,-365.4958,400.5374,-395.085,303.8103,-292.0268,167.0744,-199.013,174.9283,498.3585,-337.466,303.9078,-326.0901,-331.7143,6.7189,-277.1371,-204.9097,-313.4259,-462.7296,437.8485,267.2872,157.752,143.8784,60.1304,-492.991,326.0132,-123.3415,390.8461,-293.0175,483.4759,240.4338,271.6879,483.4801,391.2687,238.3995,-246.607,-411.7722,-257.9864,238.0949,494.3455,-489.0838,-26.7283,317.1161,-264.0242,-16.6819,-141.4839,429.101,252.2336,-325.1541,471.044,452.352,7.4546,343.3004,-336.4424,489.6317,307.1831,-139.2075,153.572,-332.5617,-361.892,110.6459,-384.8117,-423.0834,-277.9929,44.5303,167.9458,364.1204,-222.5008,-148.7923,198.4694,-74.0043,-458.4327,-227.5346,272.4441,-477.2587,303.1998,72.3129,112.9422,-98.2577,296.903,-489.0569,-461.4503,-381.6239,-440.6212,-354.1834,356.1583,-220.6533,192.5295,-409.0818,-264.2973,498.2192,-306.675,-313.6103,-124.9266,-436.5922,297.9051,121.9351,425.3888,-283.9925,-360.441,-347.4517,8.6814,477.4163,-344.6926,-311.574,-199.9541,-272.862,-360.8642,-306.0856,-218.9529,200.1938,-187.9337,-149.341,-431.5156,-135.3958,131.1299,262.0532,-210.162,353.4392,-249.2969,216.4223,499.6139,215.8176,-346.1569,177.2202,-173.1132,-466.9007,-310.9848,463.485,6.516,-334.8823,-282.7409,-375.2367,-127.4937,257.2427,384.9285,206.4053,-283.9167,369.6312,-325.1146,452.7523,-103.9792,-51.036,153.325,-344.1749,289.4824,109.8308,375.2284,-249.8481,367.8478,71.0143,471.6136,-265.6336,12.9061,-470.1288,-113.547,38.8925,-205.7232,418.6063,475.6095,-18.8731,-431.5545,-288.6452,-406.8928,79.4828,-152.1474,345.565,-200.8038,174.7789,379.2991,-385.1188,-217.6888,241.9077,-449.1824,467.832,186.0095,-82.8376,-450.7827,-32.2903,-288.132,169.8581,-275.3198,-388.1222,-431.3601,64.9652,368.9351,107.4999,408.8666,267.7858,-462.4349,-198.4615,378.1182,252.7529,-344.883,-364.0161,-124.6144,-222.8902,-103.7114,387.1701,-363.7944,-237.934,230.2082,-63.1276,-456.8188,361.9248,461.0643,160.8127,305.6079,81.2236,-322.0002,-273.4727,-356.9758,227.4751,278.5386,-10.8627,49.6988,-495.2527,428.0901,393.6169,-360.5547,-137.0244,26.962,-326.3379,-399.4972,449.7645,-238.7444,-69.8461,222.6126,-68.7657,132.7567,255.7355,-190.3762,271.6129,405.5764,115.8834,0.9645,331.1665,396.4585,217.4435,-323.6914,39.5915,282.4489,411.3888,-219.2131,240.8913,-109.5264,-438.3067,-157.3961,-180.7485,-258.9153,61.7008,483.4718,-386.0406,-499.1824,-90.2675,-358.5152,-79.3051,-97.4094,-91.7246,63.539,-307.0526,226.416,-454.475,-375.7449,300.532,409.7526,7.7042,-320.297,-244.9896,-282.6645,-414.9866,-331.4623,316.162,348.8361,-342.8609,477.2374,6.5636,-483.931,341.3556,498.2318,-46.3428,203.981,101.2793,128.4547,-285.068,56.5149,-407.6478,-151.4672,116.6673,-115.0498,-491.7974,-151.9475,474.7827,-288.4179,286.4447,-430.6331,-279.1458,318.721,-276.8375,157.9586,-9.2346,398.8374,380.2256,61.1557,13.0746,-80.139,-134.8798,-37.6466,-209.7381,236.1511,388.5629,-196.1123,-481.5887,327.8334,408.2074,479.1439,85.082,227.7623,250.2644,-47.8238,464.8471,-431.5099,489.9794,452.9999,-50.8695,-429.0862,-138.8555,-395.3346,391.3405,-249.4682,-280.6761,-460.5297,1.0129,199.1008,-97.4134,-235.0172,-466.1287,-302.7993,298.4108,-22.478,173.9936,122.8033,-235.0353,231.5057,-97.2265,-203.8224,457.6806,484.1385,-309.3619,-168.3588,-177.2797,-3.9408,-279.2997,104.4862,-139.4921,-450.2539,402.541,-437.1151,-337.4914,-200.3446,-164.484,-293.7216,471.7414,192.6153,233.1926,-122.8377,356.5476,450.1361,-400.0941,61.0466,441.7145,189.7192,-69.6348,252.5418,-246.5242,-344.0219,14.2904,87.2185,-119.2684,205.422,-374.4802,33.4042,81.2271,-2.5025,-138.6816,8.1989,-439.7698,-446.1887,-374.9012,160.9795,49.3705,72.7925,245.9454,-138.7558,11.9923,414.9421,5.9535,-142.9589,396.2571,-222.2068,-2.6172,-90.5871,346.7415,-337.3213,-372.4473,91.8271,310.6442,263.7468,-357.0433,-246.0827,25.4967,55.8069,-64.7183,-342.7375,-356.7083,70.0885,-79.026,-346.3906,206.2687,-440.6602,321.8775,223.3025,159.6939,292.4308,241.077,-219.0901,495.9946,0.3506,-166.4262,475.1836,-272.5527,118.8711,458.2456,353.3839,-82.5653,37.2834,-92.4387,146.5082,233.4743,-408.0537,-469.9263,148.8959,-324.352,498.608,-324.5319,-114.6779,-200.4192,404.8448,-289.7989,400.6151,-372.9065,359.7581,141.4237,-304.6837,314.3738,-302.4693,442.6138,-224.0818,270.1887,-477.1098,429.0239,264.1871,26.84,283.4518,129.5215,6.6673,-91.4464,75.821,261.5692,-403.0782,-213.9284,-356.8221,-232.4484,33.5696,99.1931,344.0097,187.4695,-264.0572,-199.6103,342.5485,187.058,31.5948,-275.4046,215.9846,425.1114,327.1992,437.8426,-281.2049,71.7953,393.346,-339.9023,-78.8502,314.1866,-120.7207,-416.0802,-327.1001,413.6143,-236.2051,247.1197,318.5011,-194.295,486.3421,409.0831,252.6212,-452.654,-215.7497,-464.1643,61.9033,66.4139,-425.8918,-401.3522,-395.1639,427.7052,-264.1728,131.9144,258.4416,-442.2357,68.3167,441.5518,138.4774,470.7538,-14.6434,-436.2225,385.0708,286.1155,323.9014,137.4596,-352.5503,1.9307,-314.7656,449.5639,-468.3008,81.2499,487.4562,270.1387,-445.3627,460.1174,-205.2539,-32.6044,359.0438,-115.5841,-268.6624,-495.8554,-474.4781,337.9834,-281.4488,252.1636,-33.645,-26.6636,193.8834,287.2377,6.9748,414.4343,-211.7143,-23.0035,-226.5275,-400.285,-336.3935,28.1908,244.27,21.9938,-222.3759,-103.1418,464.7943,-256.0156,46.7511,-487.2509,-321.3631,479.2142,328.166,-481.2039,253.4962,100.2875,-399.98,-81.5868,289.7597,-318.7266,-264.2078,129.4063,407.6828,222.8346,370.0391,46.9838,-356.4992,-305.9992,-258.4048,-410.7736,-245.9092,32.9185,-237.9085,-403.8853,12.0239,-164.6252,107.369,8.0379,-139.3796,365.9266,-448.5863,314.1141,-280.0686,-463.4747,2.6092,-376.8811,96.7462,242.419,-480.9968,345.3697,328.281,39.0387,-342.3026,469.0461,-103.9411,381.0458,-141.6771,-4.7988,289.4799,-55.0671,-292.4788,364.1267,-395.9876,-232.5859,-285.7012,-444.7762,79.5454,251.5539,359.3705,467.2154,273.1778,-373.8216,299.611,-464.32,-106.0638,491.2626,-39.3721,-110.1154,383.4063,45.0848,262.2361,-111.754,249.0826,-305.9751,22.9663,-120.4794,484.0797,151.9063,388.5088,105.9067,444.0361,-45.5696,243.9313,303.4003,-27.795,-7.2151,411.6561,-100.6193,-207.3277,-6.4576,-300.3722,118.2638,342.3654,66.7861,104.0615,180.5752,281.6788,-342.7549,-65.8778,140.9091,-169.8935,-437.2435,-392.4147,-348.2217,202.3684,440.4071,-276.2247,129.5096,-43.4059,-456.876,-445.1126,-193.8847,-156.3408,274.7116,-129.6168,-484.7027,214.0806,375.6649,444.5303,-71.8577,-474.5957,-342.2716,-322.7281,205.6087,-14.3469,-283.0586,-86.2198,-420.3924,182.3599,22.7485,452.8141,-286.5839,155.1115,-316.4854,-28.3824,56.4873,-146.001,378.2396,473.2566,380.2417,-399.6208,-347.9016,206.5985,-145.9688,-219.9708,-216.6865,404.4334,324.8516,55.3154,-119.4645,-79.2847,-191.5158,-136.3728,413.3355,356.7344,-437.7335,404.9099,-494.6143,135.9107,151.2158,-161.0672,451.0975,-93.0876,495.7659,321.2577,-451.6211,-311.9214,-432.4626,496.8637,382.6126,97.7431,245.2208,-462.5156,-274.939,116.6882,80.6219,315.5602,-342.4345,274.387,-418.7591,53.5711,-96.2339,271.8546,-46.8098,150.3864,206.6682,311.9593,174.7625,-198.5948,105.6143,212.7571,237.4211,-21.2842,-383.0439,285.4973,-80.4955,105.5129,-158.8626,-156.2353,98.5192,-308.2654,-92.7883,45.686,-380.6921,140.1508,365.9526,108.1565,-140.4508,-246.5095,133.3693,-4.6582,-20.843,339.374,-99.2908,17.8824,242.8291,75.8953,-441.8762,-352.3943,-484.0549,-401.3674,321.6953,213.7102,261.1824,-41.5899,65.2736,-26.9977,152.9615,308.5357,-211.4979,477.2073,-414.7828,-330.2034,-123.7898,-261.1105,-328.6632,-15.1514,438.4531,-323.3771,-173.6672,-293.5578,459.1075,-18.34,-270.1311,-315.6445,348.4226,-435.2806,-419.9553,-106.1863,-283.0003,43.5508,-18.0891,224.808,406.4155,-163.6988,-129.2904,207.8322,474.5666,-60.1079,9.563,44.705,118.7999,-301.6795,-38.2161,410.4003,-190.4926,-430.6086,1.2693,312.7535,-455.5725,-271.7346,-159.4378,-227.9918,312.9331,166.2825,-31.7905,-227.9038,-421.644,296.5264,-335.4129,413.344,48.8782,217.3682,434.8719,-387.0484,170.5191,201.0157,127.1522,474.5561,-100.6847,-434.2549,29.5853,-467.6037,184.2936,116.9028,124.6507,-497.3002,-86.4991,59.6243,-104.9888,-294.6228,223.8354,-97.9298,64.2283,203.7397,186.3586,64.5045,122.1795,439.3753,464.9225,434.9882,85.5836,259.4985,70.5414,-117.1196,198.2037,-127.745,-200.2022,-386.0653,1.6688,272.3237,211.4442,445.0575,479.2069,-354.0842,-211.1788,160.3409,258.6131,-71.1154,-196.203,-95.1323,-398.3867,70.6868,15.5394,333.5079,187.8193,-393.7479,269.1152,-336.0885,339.4546,-147.6351,186.847,-126.4872,-108.1731,-70.3962,-389.0454,135.3408,-51.5671,4.6139,-3.1587,-274.941,-208.586,171.0845,-277.1015,-104.1653,-260.934,-310.5456,290.0738,-38.1867,-254.3353,31.6405,433.6526,86.9343,48.5563,137.4622,-34.6388,-1.5028,-452.3147,349.1007,-347.9019,70.4255,-201.5194,-430.2517,177.8199,-391.6226,20.1876,-287.8148,-190.1158,-356.0897,-319.7011,87.2696,-141.1962,-137.9268,-70.4841,95.4435,16.2261,191.5316,-214.8942,142.0224,209.0575,180.5105,26.1511,-497.0902,-186.2708,441.5505,-7.6379,23.9577,-401.2169,-339.3474,16.9572,269.8157,178.6692,299.5455,-367.3993,-413.7073,-96.9188,-472.0939,-327.975,129.6294,446.5669,-32.714,-120.6079,71.7334,190.4871,436.6714,110.0289,-108.4299,8.0033,-341.055,77.7304,-196.1335,-343.1391,-152.6897,-378.0097,-106.9584,395.4607,-98.6717,-131.0531,-140.8907,-185.3101,-68.8474,-478.2088,-18.3317,256.0313,-119.4212,334.7436,318.1335,-20.8287,-147.7622,118.1926,-218.2094,-478.7367,217.0914,219.1878,75.2151,231.5097,-410.8572,-46.2061,153.4654,264.0178,144.8928,-115.1857,-369.8591,126.6643,-122.1998,480.7727,-85.4362,134.3245,-34.403,124.6945,12.1795,-184.8116,390.6826,87.9712,367.0822,-233.2724,-245.9838,104.6339,-53.7753,-264.3381,50.9031,-122.0604,136.6276,465.3429,288.8934,5.7445,-325.7759,53.493,-441.8264,-271.3847,-371.3886,-272.7637,-102.4757,-358.4499,-143.2793,-64.6363,499.8284,-155.8017,-37.8801,63.5318,-377.6101,125.3457,57.231,49.3608,-245.5766,-47.9802,383.4127,-114.1047,-30.258,-479.6988,-194.4846,368.4079,466.1545,-26.7084,8.2433,74.9479,-155.4871,494.9634,-196.3082,-206.8022,423.2288,-494.5835,-291.7666,-204.8478,396.6,-418.9048,-130.0584,-137.5258,-440.7922,73.1423,-251.5694,356.1615,-34.088,-23.3318,43.2522,-297.3896,409.686,-305.5675,424.8321,-154.9096,181.7696,-87.5939,-151.7475,-319.3074,227.2369,-113.0086,-68.1299,368.0398,-20.3706,-296.0095,-269.9336,-250.5127,-56.5895,188.9818,82.7481,488.6398,-151.2088,11.8563,320.4209,316.3155,317.2716,-185.4569,128.2219,108.4381,-453.2648,-406.1359,-414.2863,36.6919,-160.1338,188.7767,364.4688,-13.3882,233.621,11.2764,-154.8894,424.1841,-128.4954,23.1408,183.1928,382.2918,-464.2506,234.1366,-447.21,-425.1161,66.1712,424.058,299.3596,372.7703,-162.3764,-37.8575,-468.5142,189.9036,172.0345,310.1368,-459.7659,-219.5317,-68.9306,211.4315,-408.8232,215.1716,-134.0617,367.326,385.2393,453.6431,-258.6041,194.9712,-266.8576,145.4018,-406.4884,119.3747,466.6835,-404.694,-480.8574,-3.1007,-48.0469,-70.915,-229.4956,-69.6999,-114.9404,372.8744,-247.5689,250.4333,252.9375,71.5672,323.3984,268.7582,16.7518,-258.5373,252.518,378.1721,-197.3271,-211.1179,444.2923,-152.2646,262.3183,159.3338 + -457.1953,259.6788,271.9127,-26.8833,403.0915,-56.9197,-445.8869,-108.8167,417.8988,13.4232,-281.765,-405.8573,262.7831,-279.493,328.5591,-453.3941,-116.0368,435.4734,-439.0927,-332.9565,355.4955,324.9878,33.3519,-165.0182,188.1811,467.3455,185.1057,-233.8598,-17.6827,283.4271,-329.1247,-402.9721,404.7866,-358.7031,-267.4074,441.8363,320.2389,-128.0179,339.544,196.2018,-60.2688,336.0228,-440.1943,318.6882,-158.2596,277.0925,-487.4971,-338.9865,-275.716,136.8547,-253.6206,-40.2807,-357.0971,188.0344,-203.0674,449.9618,-223.2508,468.1441,302.4002,-65.0044,342.4431,205.6774,-118.636,-29.9706,183.9825,223.956,314.0691,137.0129,-8.0452,-15.131,-269.8643,-12.691,228.9777,-147.8384,-347.1117,-283.1905,459.2004,296.1321,-483.1799,414.3423,383.0187,-408.5525,-286.8169,482.5853,9.5232,-459.4968,-333.2521,109.0969,129.5107,43.4369,455.8283,-4.0423,-318.5019,339.1641,416.3581,-309.0429,84.2325,-355.8753,264.7671,43.8922,-298.6039,412.4413,19.4198,-251.279,-191.157,-478.2058,251.5709,-178.9633,479.293,188.399,380.9755,268.6575,120.3467,-322.0305,-255.4894,-377.515,56.9153,-133.9486,156.2546,-428.9581,-54.994,28.2146,158.7121,-426.7307,491.0086,-150.7205,-233.1005,244.5174,45.911,-406.1181,233.1636,175.9334,414.2805,421.7396,-322.8029,-252.2412,35.7622,318.5223,-141.5121,-375.4407,380.3081,222.1228,443.7844,367.377,-202.9594,-493.6231,-184.2242,-253.9838,463.1952,-416.3887,252.0867,-63.5317,411.0727,98.6261,330.7369,363.5685,-498.1848,-413.7246,-2.5996,-238.3547,-355.6041,-303.698,43.6266,383.1105,-72.3066,274.7491,321.9322,220.9543,-30.5578,400.0891,-181.7069,-386.4403,497.2206,-408.9611,138.485,-133.5666,-340.2569,-223.6313,270.884,-215.9399,74.3931,-244.1364,353.4219,-156.9905,488.3148,96.352,401.8525,-468.8344,129.9715,-27.1953,-168.631,187.7049,-336.5255,331.0652,204.3538,36.0182,366.8502,-468.6579,478.1409,-332.6136,-281.8499,63.7165,-458.8161,14.8894,-145.6397,267.1499,85.2025,326.3764,-419.6361,-133.9626,102.0618,443.3099,-207.9032,132.7032,234.001,-26.0754,105.6478,174.1252,-403.3511,-164.9714,-262.9344,-58.9668,357.6414,355.7508,-331.8443,153.5733,417.5712,260.7394,-150.1053,-435.6525,-364.1558,328.6183,-270.0863,107.1746,345.7998,480.8749,206.3896,-498.237,495.0835,481.9384,418.5571,-246.5213,-363.7304,311.7076,-53.1664,-297.3839,122.3105,-13.9226,-145.9754,-189.1748,460.9375,194.5417,-28.1346,-261.2177,-88.8396,-254.6407,-465.3148,-169.5377,24.3113,-116.2323,-420.3526,317.2107,-231.6227,-270.8239,387.8598,412.4251,428.1373,308.2044,275.2082,402.3663,-209.9843,-492.7269,225.1948,326.469,207.3557,-131.7677,371.9408,-139.3098,324.205,-126.6204,-335.0853,-248.2587,-344.907,307.2109,-441.3296,-318.027,414.6535,172.0537,-280.4991,331.0475,-158.0178,-285.1951,12.3632,149.9347,282.8302,-91.5624,-180.6097,496.0881,368.2567,357.6875,-194.2106,48.9213,-479.2956,-165.139,238.7811,302.7007,297.2805,208.7099,-5.5755,-85.7911,-358.1111,344.6131,415.7199,-219.1525,490.5003,-46.0096,498.2818,-91.8067,384.0104,396.1107,408.2827,-5.3919,-333.7992,-168.985,273.72,359.7125,227.7621,158.3406,-366.9722,3.7709,27.2728,71.9754,269.5792,-365.281,117.9152,-184.3682,356.9013,-142.6579,-496.7598,122.0194,89.1247,4.1914,-81.9905,465.0841,115.4727,169.6116,-199.9951,-223.3149,-447.3022,11.831,320.2368,105.1316,344.2462,8.6333,62.2285,-70.3944,-284.6694,-482.4229,-448.1569,-237.7858,222.3921,-172.1386,-312.5756,-390.0565,398.951,119.9784,-419.6537,121.3186,481.3011,-181.6662,-56.0219,424.1359,7.1461,138.8567,-307.0606,334.066,254.0897,473.7227,45.5936,133.7268,49.5334,-283.3406,179.4466,105.6191,-30.4162,271.5774,6.1156,110.4732,286.4325,13.3431,494.0139,-371.7624,283.3652,272.0558,-302.343,122.7245,-463.9261,299.9807,282.4502,-262.4911,183.4289,222.7474,-229.5973,141.6188,262.5468,278.1155,-331.0891,-393.6027,-230.1461,201.6657,-93.3604,-395.8877,-125.2013,-222.973,368.3759,234.6628,-28.6809,-151.0703,432.0315,253.1214,430.7065,-143.6963,499.84,85.1683,280.4354,196.6013,139.0476,120.8148,-398.8155,-335.5504,229.0516,403.8604,-383.9868,-79.975,-152.77,220.4036,135.0355,238.2176,-242.3085,-177.0743,381.8202,411.167,378.0153,456.5976,364.013,24.2316,-395.4659,-210.2581,138.7539,479.7398,-291.7797,-123.0491,188.9817,42.8931,-354.4479,358.853,-43.6168,-190.6656,-103.3037,47.8915,-358.5402,374.9758,493.9951,-427.2376,-119.1142,-453.2975,-326.2696,-212.8273,-142.2931,-179.795,355.77,-156.2903,331.2006,451.9252,185.2944,-96.1941,173.0447,345.2744,43.0151,381.7845,-143.4125,84.654,-208.7053,-293.141,333.6349,-80.472,-376.9817,214.6298,-43.0931,-254.7834,-421.6961,-368.844,467.5544,-418.61,-66.6824,-350.2671,348.8241,252.3495,41.8677,-128.869,90.0391,-136.7405,-136.7822,489.8074,-396.8204,63.8355,323.9557,-83.6674,451.263,152.8955,-291.7497,410.0787,-299.7468,51.34,-298.6066,-58.853,325.911,-281.9541,-15.3457,299.1325,-347.4959,388.407,343.1096,28.1816,24.3013,-111.3312,190.5583,279.9848,-479.8894,123.2182,233.8425,-466.2128,-134.7122,217.8674,432.9523,-186.799,-477.2512,-223.5514,64.274,141.5251,-161.2187,150.2791,-228.1087,81.172,451.0879,-230.3818,-304.9398,402.1081,199.1266,275.3423,-123.9548,-21.1815,-384.544,446.9626,208.9692,-337.4827,-58.1011,344.2642,230.2868,44.9176,245.9885,-284.1875,-351.6104,108.1289,459.649,191.4334,53.591,136.7139,10.5912,-15.8411,62.8305,448.5256,194.7705,-356.3214,84.4996,-133.2502,-358.6308,262.7949,219.8741,-355.3985,468.2922,243.7227,-408.3166,188.6111,-221.7264,-286.8234,-340.3046,-224.5375,332.2615,73.2788,-24.7857,-485.2204,-136.7196,-162.9693,92.6017,-99.611,-186.5203,495.5483,240.8051,409.6493,-58.1321,-154.1239,-335.9719,-82.4408,-471.3057,-43.373,301.0884,-96.6359,-236.6906,435.7313,-227.7263,-406.8904,-392.3187,169.0043,-371.0852,-271.3652,-57.4466,-196.8455,52.741,361.7395,-117.8599,190.5339,276.6457,-321.9851,425.881,-473.2662,-74.2968,221.3612,-465.4429,181.723,-78.4508,21.6152,148.8107,-166.1687,-281.6391,-462.3636,-420.5255,-161.4143,98.8383,-374.5345,-366.2851,187.1506,-405.1865,239.4847,-246.8352,33.1748,-344.1211,477.9759,-294.1354,-359.5015,-44.8454,151.7072,-22.7324,-260.3293,99.1414,-20.5536,173.3766,-422.6692,458.3853,-199.7898,-236.3929,365.2599,-66.4191,388.3472,283.0336,-268.9463,269.5704,360.9679,-322.102,-407.0705,-93.0994,338.9108,-189.1359,-216.9102,-249.0153,122.6058,-254.8318,-112.2771,-279.0506,-168.4431,392.888,394.7607,468.0544,340.1852,-293.1288,-8.2912,-419.2608,323.3382,-93.8793,-242.0672,427.7716,-441.6906,128.3229,424.4679,-71.8586,134.5411,-74.5205,18.4141,17.7277,126.9123,-137.6119,33.3783,222.9912,-279.3582,89.1226,-90.031,12.7221,98.7767,-80.2372,-485.9212,-481.6575,-325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-201.118,332.1516,425.2758,138.1284,-229.4302,432.9081,2.9898,-437.7631,-448.2151,129.9126,-170.2405,499.0396,-48.2137,363.8046,-423.2511,-28.0804,-267.826,-356.6288,-99.9371,-409.8465,170.4902,-269.2584,-277.4098,300.8819,-142.5889,339.0952,16.2275,-310.8646,201.0733,-495.5905,341.9279,-149.1184,-494.4928,-81.7343,209.9762,273.4892,380.3163,359.2424,-242.5,-42.1268,-303.9792,11.6018,361.5483,416.4178,10.3282,195.9796,148.8096,-60.9724,-205.5221,-145.4574,-341.5913,426.8996,-19.5843,60.6265,-133.4191,-139.8737,281.7465,461.2854,-270.8902,61.0182,-58.6791,-254.0193,-234.1206,-208.7334,39.7498,-14.337,-68.2319,-342.2756,403.6834,401.6122,-166.1637,47.3592,-325.7,274.5459,343.4873,328.3783,-370.1657,-122.8967,-231.3182,122.6609,119.2685,-223.5437,-210.8076,116.5022,340.2814,256.1852,-217.3487,-150.9598,331.1343,-453.8182,-448.0842,-95.2475,-340.9942,-416.7835,-96.7226,-328.7212,-373.4337,472.2214,-484.522,-465.1583,330.0712,73.2052,-55.1266,-352.8984,341.0742,-230.4845,321.0752,236.2116,35.1902,75.3489,-469.4042,110.2036,35.1156,454.7224,103.0685,-221.7499,-23.6898,-259.2362,-110.509,-261.0039,219.2391,-139.9404,155.7723,377.9713,434.0318,-365.1397,459.1471,-318.5774,323.4256,194.325,-311.9529,-153.9019,-346.5811,76.4069,443.2121,-199.407,495.6636,-138.5213,-145.3432,-151.7758,-365.3547,263.6507,-491.1686,-183.5585,-12.6044,318.5346,-443.8639,-179.0338,477.9093,-355.5118,-423.0035,-229.1166,-96.7782,-479.2384,192.9085,223.3407,-302.9472,297.3847,477.584,-297.5958,168.6023,-80.6912,-89.8717,87.1476,-129.7807,346.5576,-253.9729,-399.6858,-389.5785,35.1648,-180.451,-49.6084,83.9582,-185.2329,97.283,195.5249,-91.6969,199.202,-449.792,333.4825,-113.7558,443.434,394.3587,-94.9074,71.2092,-251.1774,-85.047,-46.4004,20.2595,341.1073,-91.2527,86.3775,303.1247,-336.9011,343.9894,-384.1261,154.4411,-465.2493,-63.3249,488.0231,348.6725,458.2093,322.401,220.2532,283.3734,-386.4252,-256.5262,-87.2205,96.8199,47.6908,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-84.9038,358.3475,-88.9271,499.7721,-161.7403,355.4894,313.6211,-176.1703,61.8427,107.603,-176.063,-426.5408,292.3612,58.3331,-115.8853,471.4131,-76.4815,-309.6263,361.4518,192.4763,-145.7968,256.3888,133.335,-474.0901,-366.9793,-495.223,457.2366,170.056,285.0152,89.8213,225.2251,354.1822,-298.374,-332.9164,-55.2409,306.9283,25.9392,218.0624,7.5085,-151.8768,-155.4932,6.0001,201.4506,-259.9874,485.1078,-362.8516,-230.1434,-398.2512,243.0012,32.302,-197.91,144.1195,-89.4196,-44.0399,-371.7866,227.6007,492.7526,499.3824,162.2475,279.0325,177.0781,341.0137,199.6009,108.1678,312.2319,-211.5001,-92.675,357.0513,-337.924,-348.984,-350.3677,173.3473,-193.7346,-318.5609,-2.0928,46.6287,-346.8513,36.634,-277.4949,-149.325,481.1378,370.3864,-139.6689,-332.2805,48.0292,109.8363,494.6994,373.6992,495.7442,400.4998,-26.2276,-308.7669,188.9497,257.9182,-116.6944,269.8932,197.005,123.1139,-356.2058,485.1982,-4.0119,397.8434,-204.67,-494.5133,-414.1299,142.1512,-36.5446,390.0718,6.9876,263.1216,457.5598,89.6086,-266.3804,17.3457,88.8182,236.6271,81.175,-170.2249,-5.7664,422.7852,180.3349,-135.2642,149.2285,-70.6607,-46.169,-389.3313,230.6125,388.4853,-438.3426,111.8034,300.0416,37.5604,-437.3868,-114.1336,312.7777,-99.1161,-312.9015,-147.3787,-434.0536,19.5034,141.706,-281.4504,-208.9608,281.4619,-361.0596,-464.2757,77.8205,232.5575,165.4104,424.8738,124.5555,342.038,86.7543,278.0216,311.2686,337.834,-90.0545,-210.1143,-488.4095,-80.7535,92.3731,-122.622,-288.0571,1.7285,-5.2998,100.0717,-395.0571,-477.5587,-160.5642,-119.4214,-232.233,415.7276,-204.3216,-436.7766,-103.4644,-427.0939,-31.0927,-440.2919,120.5971,-223.3623,-199.0988,304.8697,432.5731,-231.5791,-397.696,306.4134,330.1018,32.4345,-175.719,464.6091,-291.5686,300.1631,-167.4592,238.9574,104.5893,-187.2215,-294.0111,-361.9094,480.6847,-304.2133,-448.7144,67.7235,-255.9669,254.7379,464.5465,6.8909,-368.7554,337.5993,39.1928,-376.0625,433.4224,-109.1488,341.7731,377.843,446.839,-192.283,251.1592,437.6812,-478.3409,345.7668,377.965,125.6188,-462.0904,-235.3324,316.8892,-460.7371,248.9306,418.7082,-333.7257,-104.5062,-408.1356,148.6624,-158.4929,-477.0664,80.4926,-214.6292,211.3377,322.7854,-312.851,403.0215,-213.3089,-71.3355,-276.1068,-293.0902,-277.4559,54.2176,-119.1285,-479.4361,-492.6072,8.3732,42.4988,-5.576,-198.6151,-357.0952,-331.5667,186.6195,317.3075,201.267,-37.1731,-278.3164,-467.7796,-163.3909,-117.305,-233.9266,277.7969,181.9723,178.8292,-168.7152,-436.041,171.345,369.0302,423.7144,434.0961,-428.1816,23.7334,-136.6735,-222.4486,180.8461,57.5968,129.2984,127.1866,-109.3928,-143.6253,-385.9948,127.9867,-8.8096,-239.844,66.6491,-50.7301,-309.1113,-474.6991,212.1767,-444.4596,-211.3601,351.3551,335.0507,-128.6226,-98.5249,-257.454,489.8014,-378.8622,311.0304,-4.9107,362.7586,-458.8825,373.2779,-103.29,-5.6216,122.0183,76.9731,17.8771,289.8893,-56.4338,375.9665,-83.9991,440.0823,142.2309,-471.0813,-59.4847,-400.4217,91.4892,374.4009,486.8697,414.5213,-0.3535,-278.2345,-231.206,-238.479,389.3143,-276.9742,-33.9869,349.1201,127.3928,-410.7213,337.3789,36.4048,333.4291,-12.4075,483.8778,311.4489,-74.0628,-379.6051,463.234,157.5614,-140.9455,120.7926,-161.2341,194.162,-412.6181,-9.1258,-194.5065,441.1572,255.5455,-73.8086,-119.4013,-486.4792,-27.4352,98.9738,-119.002,-75.5589,261.7675,156.0993,89.6457,-190.6318,429.9325,195.9536,-172.6155,-22.7976,438.9412,-246.4661,447.7281,434.5346,405.8957,217.3324,392.6129,-158.604,15.8632,483.0414,334.7693,-307.2482,302.1267,-7.4125,3.8081,-405.7316,377.5069,51.2307,235.0695,269.737,-389.3487,186.4225,-36.8521,401.2051,-59.0378,-190.8023,-182.8076,-362.6136,-124.8064,362.4142,45.3344,-330.1214,-162.5452,-434.4411,219.1143,-374.1038,364.5639,-268.582,-22.9247,-73.8849,-54.5258,-23.0882,167.9233,-181.9807,-207.1173,300.2193,206.5903,-72.013,-244.4396,-435.5389,10.3523,-435.3545,-138.8392,449.8426,-244.8971,229.7666,267.5225,-401.6021,466.3278,418.3623,-317.8205,28.5192,384.5628,-79.6177,469.4532,-395.1986,-353.4477,-93.6914,70.3999,-441.0627,-201.1221,141.2748,433.3389,82.413,-394.0046,-438.6836,453.4704,-160.6535,353.0374,-238.0377,236.5195,497.9019,202.9472,-421.6417,-382.042,84.6308,430.1599,-390.9918,-195.0401,255.6526,-86.5964,-491.667,-199.1557,-102.7114,474.877,-292.9154,-77.3163,143.5625,58.8126,-284.8908,-457.6457,212.5317,480.4032,-324.0829,491.0165,-494.7934,267.4311,-142.2401,-368.9058,-370.4955,498.803,-6.7377,-395.373,177.8868,306.9761,80.4185,-239.1253,-435.1349,7.6298,-157.6242,348.6095,475.7845,317.7116,-353.7336,-40.2881,353.7096,-60.9783,-385.5816,243.8071,-398.8341,62.343,340.0251,-24.8105,-343.4186,189.6737,-467.3026,104.7127,159.5467,-482.5496,71.6951,-163.5304,-321.8438,185.2875,-331.6885,-102.6817,-242.7548,-259.4407,220.6898,231.6571,-297.1145,-186.9472,-316.9286,-36.2392,-293.964,296.3878,467.7409,-277.6389,493.2143,417.1244,12.241,-343.7893,-33.7207,457.2978,-248.9726,-409.5439,-92.4779,-173.7584,400.8483,59.7439,13.3265,-175.617,37.333,-307.6469,-82.3687,332.578,-412.0079,144.7037,350.6506,423.3235,-53.2147,67.9581,-447.3845,-461.0187,371.1702,386.2045,352.2722,-119.098,123.9178,-52.0535,465.2626,474.0272,402.9961,491.4763,-33.1373,-228.8607,-383.3299,408.8192,-275.155,489.8633,-349.5073,346.9781,129.3929,282.1868,-77.3384,277.3026,412.3277,263.6705,473.3756,-437.9988,114.1686,-452.3331,-167.8898,-193.6217,444.6168,-354.3223,-238.0967,432.0883,-349.7249,-42.3659,-304.7343,296.2192,-136.5386,-121.7774,450.4678,140.5384,-450.8993,93.8942,-54.4945,498.521,-461.7182,111.5166,-397.6007,-397.959,-20.9331,-19.7068,78.551,161.9472,-24.8682,-434.4537,102.9447,214.298,-494.3813,211.6782,64.8196,372.6962,-399.8337,114.5476,-191.0045,-369.6465,-391.7201,-204.9951,-201.7654,475.898,-262.3247,-348.6974,79.4062,-112.4281,-102.266,67.3008,335.485,68.4289,-433.9104,-392.963,-73.3788,276.5766,-105.2219,422.6201,192.915,-388.3541,242.3915,479.5633,42.5998,259.6189,-316.5861,390.1121,-216.0274,-373.296,103.7169,321.9107,19.0023,487.2627,151.6922,276.7424,461.6928,24.4758,133.263,-47.289,-413.9538,435.2414,-466.9724,-270.6602,238.9442,-110.5389,403.5151,-395.4393,-208.2219,-53.0773,-26.5792,-387.6534,-120.5566,143.2237,-305.3778,442.0665,417.9523,460.3337,254.8689,-375.9436,-101.0153,232.4727,-35.5285,-470.3007,-423.9161,-108.9997,-29.6555,233.1043,240.4766,404.763,276.8465,-354.4058,74.0678,-343.244,332.9786,361.2964,-322.0828,-41.1861,-122.8074,-299.5682,-481.218,-157.3994,310.6317,-261.176,310.2644,-239.9855,255.1004,-311.3351,437.9486,78.1311,-133.9261,-176.2119,45.9943,492.3169,266.5795,16.8553,-470.9413,-331.2718,218.4122,369.7118,-179.3201,-165.7277,-87.9832,357.6499,-261.0345,442.1609,113.2997,-112.5643,481.2426,-365.4958,400.5374,-395.085,303.8103,-292.0268,167.0744,-199.013,174.9283,498.3585,-337.466,303.9078,-326.0901,-331.7143,6.7189,-277.1371,-204.9097,-313.4259,-462.7296,437.8485,267.2872,157.752,143.8784,60.1304,-492.991,326.0132,-123.3415,390.8461,-293.0175,483.4759,240.4338,271.6879,483.4801,391.2687,238.3995,-246.607,-411.7722,-257.9864,238.0949,494.3455,-489.0838,-26.7283,317.1161,-264.0242,-16.6819,-141.4839,429.101,252.2336,-325.1541,471.044,452.352,7.4546,343.3004,-336.4424,489.6317,307.1831,-139.2075,153.572,-332.5617,-361.892,110.6459,-384.8117,-423.0834,-277.9929,44.5303,167.9458,364.1204,-222.5008,-148.7923,198.4694,-74.0043,-458.4327,-227.5346,272.4441,-477.2587,303.1998,72.3129,112.9422,-98.2577,296.903,-489.0569,-461.4503,-381.6239,-440.6212,-354.1834,356.1583,-220.6533,192.5295,-409.0818,-264.2973,498.2192,-306.675,-313.6103,-124.9266,-436.5922,297.9051,121.9351,425.3888,-283.9925,-360.441,-347.4517,8.6814,477.4163,-344.6926,-311.574,-199.9541,-272.862,-360.8642,-306.0856,-218.9529,200.1938,-187.9337,-149.341,-431.5156,-135.3958,131.1299,262.0532,-210.162,353.4392,-249.2969,216.4223,499.6139,215.8176,-346.1569,177.2202,-173.1132,-466.9007,-310.9848,463.485,6.516,-334.8823,-282.7409,-375.2367,-127.4937,257.2427,384.9285,206.4053,-283.9167,369.6312,-325.1146,452.7523,-103.9792,-51.036,153.325,-344.1749,289.4824,109.8308,375.2284,-249.8481,367.8478,71.0143,471.6136,-265.6336,12.9061,-470.1288,-113.547,38.8925,-205.7232,418.6063,475.6095,-18.8731,-431.5545,-288.6452,-406.8928,79.4828,-152.1474,345.565,-200.8038,174.7789,379.2991,-385.1188,-217.6888,241.9077,-449.1824,467.832,186.0095,-82.8376,-450.7827,-32.2903,-288.132,169.8581,-275.3198,-388.1222,-431.3601,64.9652,368.9351,107.4999,408.8666,267.7858,-462.4349,-198.4615,378.1182,252.7529,-344.883,-364.0161,-124.6144,-222.8902,-103.7114,387.1701,-363.7944,-237.934,230.2082,-63.1276,-456.8188,361.9248,461.0643,160.8127,305.6079,81.2236,-322.0002,-273.4727,-356.9758,227.4751,278.5386,-10.8627,49.6988,-495.2527,428.0901,393.6169,-360.5547,-137.0244,26.962,-326.3379,-399.4972,449.7645,-238.7444,-69.8461,222.6126,-68.7657,132.7567,255.7355,-190.3762,271.6129,405.5764,115.8834,0.9645,331.1665,396.4585,217.4435,-323.6914,39.5915,282.4489,411.3888,-219.2131,240.8913,-109.5264,-438.3067,-157.3961,-180.7485,-258.9153,61.7008,483.4718,-386.0406,-499.1824,-90.2675,-358.5152,-79.3051,-97.4094,-91.7246,63.539,-307.0526,226.416,-454.475,-375.7449,300.532,409.7526,7.7042,-320.297,-244.9896,-282.6645,-414.9866,-331.4623,316.162,348.8361,-342.8609,477.2374,6.5636,-483.931,341.3556,498.2318,-46.3428,203.981,101.2793,128.4547,-285.068,56.5149,-407.6478,-151.4672,116.6673,-115.0498,-491.7974,-151.9475,474.7827,-288.4179,286.4447,-430.6331,-279.1458,318.721,-276.8375,157.9586,-9.2346,398.8374,380.2256,61.1557,13.0746,-80.139,-134.8798,-37.6466,-209.7381,236.1511,388.5629,-196.1123,-481.5887,327.8334,408.2074,479.1439,85.082,227.7623,250.2644,-47.8238,464.8471,-431.5099,489.9794,452.9999,-50.8695,-429.0862,-138.8555,-395.3346,391.3405,-249.4682,-280.6761,-460.5297,1.0129,199.1008,-97.4134,-235.0172,-466.1287,-302.7993,298.4108,-22.478,173.9936,122.8033,-235.0353,231.5057,-97.2265,-203.8224,457.6806,484.1385,-309.3619,-168.3588,-177.2797,-3.9408,-279.2997,104.4862,-139.4921,-450.2539,402.541,-437.1151,-337.4914,-200.3446,-164.484,-293.7216,471.7414,192.6153,233.1926,-122.8377,356.5476,450.1361,-400.0941,61.0466,441.7145,189.7192,-69.6348,252.5418,-246.5242,-344.0219,14.2904,87.2185,-119.2684,205.422,-374.4802,33.4042,81.2271,-2.5025,-138.6816,8.1989,-439.7698,-446.1887,-374.9012,160.9795,49.3705,72.7925,245.9454,-138.7558,11.9923,414.9421,5.9535,-142.9589,396.2571,-222.2068,-2.6172,-90.5871,346.7415,-337.3213,-372.4473,91.8271,310.6442,263.7468,-357.0433,-246.0827,25.4967,55.8069,-64.7183,-342.7375,-356.7083,70.0885,-79.026,-346.3906,206.2687,-440.6602,321.8775,223.3025,159.6939,292.4308,241.077,-219.0901,495.9946,0.3506,-166.4262,475.1836,-272.5527,118.8711,458.2456,353.3839,-82.5653,37.2834,-92.4387,146.5082,233.4743,-408.0537,-469.9263,148.8959,-324.352,498.608,-324.5319,-114.6779,-200.4192,404.8448,-289.7989,400.6151,-372.9065,359.7581,141.4237,-304.6837,314.3738,-302.4693,442.6138,-224.0818,270.1887,-477.1098,429.0239,264.1871,26.84,283.4518,129.5215,6.6673,-91.4464,75.821,261.5692,-403.0782,-213.9284,-356.8221,-232.4484,33.5696,99.1931,344.0097,187.4695,-264.0572,-199.6103,342.5485,187.058,31.5948,-275.4046,215.9846,425.1114,327.1992,437.8426,-281.2049,71.7953,393.346,-339.9023,-78.8502,314.1866,-120.7207,-416.0802,-327.1001,413.6143,-236.2051,247.1197,318.5011,-194.295,486.3421,409.0831,252.6212,-452.654,-215.7497,-464.1643,61.9033,66.4139,-425.8918,-401.3522,-395.1639,427.7052,-264.1728,131.9144,258.4416,-442.2357,68.3167,441.5518,138.4774,470.7538,-14.6434,-436.2225,385.0708,286.1155,323.9014,137.4596,-352.5503,1.9307,-314.7656,449.5639,-468.3008,81.2499,487.4562,270.1387,-445.3627,460.1174,-205.2539,-32.6044,359.0438,-115.5841,-268.6624,-495.8554,-474.4781,337.9834,-281.4488,252.1636,-33.645,-26.6636,193.8834,287.2377,6.9748,414.4343,-211.7143,-23.0035,-226.5275,-400.285,-336.3935,28.1908,244.27,21.9938,-222.3759,-103.1418,464.7943,-256.0156,46.7511,-487.2509,-321.3631,479.2142,328.166,-481.2039,253.4962,100.2875,-399.98,-81.5868,289.7597,-318.7266,-264.2078,129.4063,407.6828,222.8346,370.0391,46.9838,-356.4992,-305.9992,-258.4048,-410.7736,-245.9092,32.9185,-237.9085,-403.8853,12.0239,-164.6252,107.369,8.0379,-139.3796,365.9266,-448.5863,314.1141,-280.0686,-463.4747,2.6092,-376.8811,96.7462,242.419,-480.9968,345.3697,328.281,39.0387,-342.3026,469.0461,-103.9411,381.0458,-141.6771,-4.7988,289.4799,-55.0671,-292.4788,364.1267,-395.9876,-232.5859,-285.7012,-444.7762,79.5454,251.5539,359.3705,467.2154,273.1778,-373.8216,299.611,-464.32,-106.0638,491.2626,-39.3721,-110.1154,383.4063,45.0848,262.2361,-111.754,249.0826,-305.9751,22.9663,-120.4794,484.0797,151.9063,388.5088,105.9067,444.0361,-45.5696,243.9313,303.4003,-27.795,-7.2151,411.6561,-100.6193,-207.3277,-6.4576,-300.3722,118.2638,342.3654,66.7861,104.0615,180.5752,281.6788,-342.7549,-65.8778,140.9091,-169.8935,-437.2435,-392.4147,-348.2217,202.3684,440.4071,-276.2247,129.5096,-43.4059,-456.876,-445.1126,-193.8847,-156.3408,274.7116,-129.6168,-484.7027,214.0806,375.6649,444.5303,-71.8577,-474.5957,-342.2716,-322.7281,205.6087,-14.3469,-283.0586,-86.2198,-420.3924,182.3599,22.7485,452.8141,-286.5839,155.1115,-316.4854,-28.3824,56.4873,-146.001,378.2396,473.2566,380.2417,-399.6208,-347.9016,206.5985,-145.9688,-219.9708,-216.6865,404.4334,324.8516,55.3154,-119.4645,-79.2847,-191.5158,-136.3728,413.3355,356.7344,-437.7335,404.9099,-494.6143,135.9107,151.2158,-161.0672,451.0975,-93.0876,495.7659,321.2577,-451.6211,-311.9214,-432.4626,496.8637,382.6126,97.7431,245.2208,-462.5156,-274.939,116.6882,80.6219,315.5602,-342.4345,274.387,-418.7591,53.5711,-96.2339,271.8546,-46.8098,150.3864,206.6682,311.9593,174.7625,-198.5948,105.6143,212.7571,237.4211,-21.2842,-383.0439,285.4973,-80.4955,105.5129,-158.8626,-156.2353,98.5192,-308.2654,-92.7883,45.686,-380.6921,140.1508,365.9526,108.1565,-140.4508,-246.5095,133.3693,-4.6582,-20.843,339.374,-99.2908,17.8824,242.8291,75.8953,-441.8762,-352.3943,-484.0549,-401.3674,321.6953,213.7102,261.1824,-41.5899,65.2736,-26.9977,152.9615,308.5357,-211.4979,477.2073,-414.7828,-330.2034,-123.7898,-261.1105,-328.6632,-15.1514,438.4531,-323.3771,-173.6672,-293.5578,459.1075,-18.34,-270.1311,-315.6445,348.4226,-435.2806,-419.9553,-106.1863,-283.0003,43.5508,-18.0891,224.808,406.4155,-163.6988,-129.2904,207.8322,474.5666,-60.1079,9.563,44.705,118.7999,-301.6795,-38.2161,410.4003,-190.4926,-430.6086,1.2693,312.7535,-455.5725,-271.7346,-159.4378,-227.9918,312.9331,166.2825,-31.7905,-227.9038,-421.644,296.5264,-335.4129,413.344,48.8782,217.3682,434.8719,-387.0484,170.5191,201.0157,127.1522,474.5561,-100.6847,-434.2549,29.5853,-467.6037,184.2936,116.9028,124.6507,-497.3002,-86.4991,59.6243,-104.9888,-294.6228,223.8354,-97.9298,64.2283,203.7397,186.3586,64.5045,122.1795,439.3753,464.9225,434.9882,85.5836,259.4985,70.5414,-117.1196,198.2037,-127.745,-200.2022,-386.0653,1.6688,272.3237,211.4442,445.0575,479.2069,-354.0842,-211.1788,160.3409,258.6131,-71.1154,-196.203,-95.1323,-398.3867,70.6868,15.5394,333.5079,187.8193,-393.7479,269.1152,-336.0885,339.4546,-147.6351,186.847,-126.4872,-108.1731,-70.3962,-389.0454,135.3408,-51.5671,4.6139,-3.1587,-274.941,-208.586,171.0845,-277.1015,-104.1653,-260.934,-310.5456,290.0738,-38.1867,-254.3353,31.6405,433.6526,86.9343,48.5563,137.4622,-34.6388,-1.5028,-452.3147,349.1007,-347.9019,70.4255,-201.5194,-430.2517,177.8199,-391.6226,20.1876,-287.8148,-190.1158,-356.0897,-319.7011,87.2696,-141.1962,-137.9268,-70.4841,95.4435,16.2261,191.5316,-214.8942,142.0224,209.0575,180.5105,26.1511,-497.0902,-186.2708,441.5505,-7.6379,23.9577,-401.2169,-339.3474,16.9572,269.8157,178.6692,299.5455,-367.3993,-413.7073,-96.9188,-472.0939,-327.975,129.6294,446.5669,-32.714,-120.6079,71.7334,190.4871,436.6714,110.0289,-108.4299,8.0033,-341.055,77.7304,-196.1335,-343.1391,-152.6897,-378.0097,-106.9584,395.4607,-98.6717,-131.0531,-140.8907,-185.3101,-68.8474,-478.2088,-18.3317,256.0313,-119.4212,334.7436,318.1335,-20.8287,-147.7622,118.1926,-218.2094,-478.7367,217.0914,219.1878,75.2151,231.5097,-410.8572,-46.2061,153.4654,264.0178,144.8928,-115.1857,-369.8591,126.6643,-122.1998,480.7727,-85.4362,134.3245,-34.403,124.6945,12.1795,-184.8116,390.6826,87.9712,367.0822,-233.2724,-245.9838,104.6339,-53.7753,-264.3381,50.9031,-122.0604,136.6276,465.3429,288.8934,5.7445,-325.7759,53.493,-441.8264,-271.3847,-371.3886,-272.7637,-102.4757,-358.4499,-143.2793,-64.6363,499.8284,-155.8017,-37.8801,63.5318,-377.6101,125.3457,57.231,49.3608,-245.5766,-47.9802,383.4127,-114.1047,-30.258,-479.6988,-194.4846,368.4079,466.1545,-26.7084,8.2433,74.9479,-155.4871,494.9634,-196.3082,-206.8022,423.2288,-494.5835,-291.7666,-204.8478,396.6,-418.9048,-130.0584,-137.5258,-440.7922,73.1423,-251.5694,356.1615,-34.088,-23.3318,43.2522,-297.3896,409.686,-305.5675,424.8321,-154.9096,181.7696,-87.5939,-151.7475,-319.3074,227.2369,-113.0086,-68.1299,368.0398,-20.3706,-296.0095,-269.9336,-250.5127,-56.5895,188.9818,82.7481,488.6398,-151.2088,11.8563,320.4209,316.3155,317.2716,-185.4569,128.2219,108.4381,-453.2648,-406.1359,-414.2863,36.6919,-160.1338,188.7767,364.4688,-13.3882,233.621,11.2764,-154.8894,424.1841,-128.4954,23.1408,183.1928,382.2918,-464.2506,234.1366,-447.21,-425.1161,66.1712,424.058,299.3596,372.7703,-162.3764,-37.8575,-468.5142,189.9036,172.0345,310.1368,-459.7659,-219.5317,-68.9306,211.4315,-408.8232,215.1716,-134.0617,367.326,385.2393,453.6431,-258.6041,194.9712,-266.8576,145.4018,-406.4884,119.3747,466.6835,-404.694,-480.8574,-3.1007,-48.0469,-70.915,-229.4956,-69.6999,-114.9404,372.8744,-247.5689,250.4333,252.9375,71.5672,323.3984,268.7582,16.7518,-258.5373,252.518,378.1721,-197.3271,-211.1179,444.2923,-152.2646,262.3183,159.3338,259.6788,271.9127,-26.8833,403.0915,-56.9197,-445.8869,-108.8167,417.8988,13.4232,-281.765,-405.8573,262.7831,-279.493,328.5591,-453.3941,-116.0368,435.4734,-439.0927,-332.9565,355.4955,324.9878,33.3519,-165.0182,188.1811,467.3455,185.1057,-233.8598,-17.6827,283.4271,-329.1247,-402.9721,404.7866,-358.7031,-267.4074,441.8363,320.2389,-128.0179,339.544,196.2018,-60.2688,336.0228,-440.1943,318.6882,-158.2596,277.0925,-487.4971,-338.9865,-275.716,136.8547,-253.6206,-40.2807,-357.0971,188.0344,-203.0674,449.9618,-223.2508,468.1441,302.4002,-65.0044,342.4431,205.6774,-118.636,-29.9706,183.9825,223.956,314.0691,137.0129,-8.0452,-15.131,-269.8643,-12.691,228.9777,-147.8384,-347.1117,-283.1905,459.2004,296.1321,-483.1799,414.3423,383.0187,-408.5525,-286.8169,482.5853,9.5232,-459.4968,-333.2521,109.0969,129.5107,43.4369,455.8283,-4.0423,-318.5019,339.1641,416.3581,-309.0429,84.2325,-355.8753,264.7671,43.8922,-298.6039,412.4413,19.4198,-251.279,-191.157,-478.2058,251.5709,-178.9633,479.293,188.399,380.9755,268.6575,120.3467,-322.0305,-255.4894,-377.515,56.9153,-133.9486,156.2546,-428.9581,-54.994,28.2146,158.7121,-426.7307,491.0086,-150.7205,-233.1005,244.5174,45.911,-406.1181,233.1636,175.9334,414.2805,421.7396,-322.8029,-252.2412,35.7622,318.5223,-141.5121,-375.4407,380.3081,222.1228,443.7844,367.377,-202.9594,-493.6231,-184.2242,-253.9838,463.1952,-416.3887,252.0867,-63.5317,411.0727,98.6261,330.7369,363.5685,-498.1848,-413.7246,-2.5996,-238.3547,-355.6041,-303.698,43.6266,383.1105,-72.3066,274.7491,321.9322,220.9543,-30.5578,400.0891,-181.7069,-386.4403,497.2206,-408.9611,138.485,-133.5666,-340.2569,-223.6313,270.884,-215.9399,74.3931,-244.1364,353.4219,-156.9905,488.3148,96.352,401.8525,-468.8344,129.9715,-27.1953,-168.631,187.7049,-336.5255,331.0652,204.3538,36.0182,366.8502,-468.6579,478.1409,-332.6136,-281.8499,63.7165,-458.8161,14.8894,-145.6397,267.1499,85.2025,326.3764,-419.6361,-133.9626,102.0618,443.3099,-207.9032,132.7032,234.001,-26.0754,105.6478,174.1252,-403.3511,-164.9714,-262.9344,-58.9668,357.6414,355.7508,-331.8443,153.5733,417.5712,260.7394,-150.1053,-435.6525,-364.1558,328.6183,-270.0863,107.1746,345.7998,480.8749,206.3896,-498.237,495.0835,481.9384,418.5571,-246.5213,-363.7304,311.7076,-53.1664,-297.3839,122.3105,-13.9226,-145.9754,-189.1748,460.9375,194.5417,-28.1346,-261.2177,-88.8396,-254.6407,-465.3148,-169.5377,24.3113,-116.2323,-420.3526,317.2107,-231.6227,-270.8239,387.8598,412.4251,428.1373,308.2044,275.2082,402.3663,-209.9843,-492.7269,225.1948,326.469,207.3557,-131.7677,371.9408,-139.3098,324.205,-126.6204,-335.0853,-248.2587,-344.907,307.2109,-441.3296,-318.027,414.6535,172.0537,-280.4991,331.0475,-158.0178,-285.1951,12.3632,149.9347,282.8302,-91.5624,-180.6097,496.0881,368.2567,357.6875,-194.2106,48.9213,-479.2956,-165.139,238.7811,302.7007,297.2805,208.7099,-5.5755,-85.7911,-358.1111,344.6131,415.7199,-219.1525,490.5003,-46.0096,498.2818,-91.8067,384.0104,396.1107,408.2827,-5.3919,-333.7992,-168.985,273.72,359.7125,227.7621,158.3406,-366.9722,3.7709,27.2728,71.9754,269.5792,-365.281,117.9152,-184.3682,356.9013,-142.6579,-496.7598,122.0194,89.1247,4.1914,-81.9905,465.0841,115.4727,169.6116,-199.9951,-223.3149,-447.3022,11.831,320.2368,105.1316,344.2462,8.6333,62.2285,-70.3944,-284.6694,-482.4229,-448.1569,-237.7858,222.3921,-172.1386,-312.5756,-390.0565,398.951,119.9784,-419.6537,121.3186,481.3011,-181.6662,-56.0219,424.1359,7.1461,138.8567,-307.0606,334.066,254.0897,473.7227,45.5936,133.7268,49.5334,-283.3406,179.4466,105.6191,-30.4162,271.5774,6.1156,110.4732,286.4325,13.3431,494.0139,-371.7624,283.3652,272.0558,-302.343,122.7245,-463.9261,299.9807,282.4502,-262.4911,183.4289,222.7474,-229.5973,141.6188,262.5468,278.1155,-331.0891,-393.6027,-230.1461,201.6657,-93.3604,-395.8877,-125.2013,-222.973,368.3759,234.6628,-28.6809,-151.0703,432.0315,253.1214,430.7065,-143.6963,499.84,85.1683,280.4354,196.6013,139.0476,120.8148,-398.8155,-335.5504,229.0516,403.8604,-383.9868,-79.975,-152.77,220.4036,135.0355,238.2176,-242.3085,-177.0743,381.8202,411.167,378.0153,456.5976,364.013,24.2316,-395.4659,-210.2581,138.7539,479.7398,-291.7797,-123.0491,188.9817,42.8931,-354.4479,358.853,-43.6168,-190.6656,-103.3037,47.8915,-358.5402,374.9758,493.9951,-427.2376,-119.1142,-453.2975,-326.2696,-212.8273,-142.2931,-179.795,355.77,-156.2903,331.2006,451.9252,185.2944,-96.1941,173.0447,345.2744,43.0151,381.7845,-143.4125,84.654,-208.7053,-293.141,333.6349,-80.472,-376.9817,214.6298,-43.0931,-254.7834,-421.6961,-368.844,467.5544,-418.61,-66.6824,-350.2671,348.8241,252.3495,41.8677,-128.869,90.0391,-136.7405,-136.7822,489.8074,-396.8204,63.8355,323.9557,-83.6674,451.263,152.8955,-291.7497,410.0787,-299.7468,51.34,-298.6066,-58.853,325.911,-281.9541,-15.3457,299.1325,-347.4959,388.407,343.1096,28.1816,24.3013,-111.3312,190.5583,279.9848,-479.8894,123.2182,233.8425,-466.2128,-134.7122,217.8674,432.9523,-186.799,-477.2512,-223.5514,64.274,141.5251,-161.2187,150.2791,-228.1087,81.172,451.0879,-230.3818,-304.9398,402.1081,199.1266,275.3423,-123.9548,-21.1815,-384.544,446.9626,208.9692,-337.4827,-58.1011,344.2642,230.2868,44.9176,245.9885,-284.1875,-351.6104,108.1289,459.649,191.4334,53.591,136.7139,10.5912,-15.8411,62.8305,448.5256,194.7705,-356.3214,84.4996,-133.2502,-358.6308,262.7949,219.8741,-355.3985,468.2922,243.7227,-408.3166,188.6111,-221.7264,-286.8234,-340.3046,-224.5375,332.2615,73.2788,-24.7857,-485.2204,-136.7196,-162.9693,92.6017,-99.611,-186.5203,495.5483,240.8051,409.6493,-58.1321,-154.1239,-335.9719,-82.4408,-471.3057,-43.373,301.0884,-96.6359,-236.6906,435.7313,-227.7263,-406.8904,-392.3187,169.0043,-371.0852,-271.3652,-57.4466,-196.8455,52.741,361.7395,-117.8599,190.5339,276.6457,-321.9851,425.881,-473.2662,-74.2968,221.3612,-465.4429,181.723,-78.4508,21.6152,148.8107,-166.1687,-281.6391,-462.3636,-420.5255,-161.4143,98.8383,-374.5345,-366.2851,187.1506,-405.1865,239.4847,-246.8352,33.1748,-344.1211,477.9759,-294.1354,-359.5015,-44.8454,151.7072,-22.7324,-260.3293,99.1414,-20.5536,173.3766,-422.6692,458.3853,-199.7898,-236.3929,365.2599,-66.4191,388.3472,283.0336,-268.9463,269.5704,360.9679,-322.102,-407.0705,-93.0994,338.9108,-189.1359,-216.9102,-249.0153,122.6058,-254.8318,-112.2771,-279.0506,-168.4431,392.888,394.7607,468.0544,340.1852,-293.1288,-8.2912,-419.2608,323.3382,-93.8793,-242.0672,427.7716,-441.6906,128.3229,424.4679,-71.8586,134.5411,-74.5205,18.4141,17.7277,126.9123,-137.6119,33.3783,222.9912,-279.3582,89.1226,-90.031,12.7221,98.7767,-80.2372,-485.9212,-481.6575,-325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-201.118,332.1516,425.2758,138.1284,-229.4302,432.9081,2.9898,-437.7631,-448.2151,129.9126,-170.2405,499.0396,-48.2137,363.8046,-423.2511,-28.0804,-267.826,-356.6288,-99.9371,-409.8465,170.4902,-269.2584,-277.4098,300.8819,-142.5889,339.0952,16.2275,-310.8646,201.0733,-495.5905,341.9279,-149.1184,-494.4928,-81.7343,209.9762,273.4892,380.3163,359.2424,-242.5,-42.1268,-303.9792,11.6018,361.5483,416.4178,10.3282,195.9796,148.8096,-60.9724,-205.5221,-145.4574,-341.5913,426.8996,-19.5843,60.6265,-133.4191,-139.8737,281.7465,461.2854,-270.8902,61.0182,-58.6791,-254.0193,-234.1206,-208.7334,39.7498,-14.337,-68.2319,-342.2756,403.6834,401.6122,-166.1637,47.3592,-325.7,274.5459,343.4873,328.3783,-370.1657,-122.8967,-231.3182,122.6609,119.2685,-223.5437,-210.8076,116.5022,340.2814,256.1852,-217.3487,-150.9598,331.1343,-453.8182,-448.0842,-95.2475,-340.9942,-416.7835,-96.7226,-328.7212,-373.4337,472.2214,-484.522,-465.1583,330.0712,73.2052,-55.1266,-352.8984,341.0742,-230.4845,321.0752,236.2116,35.1902,75.3489,-469.4042,110.2036,35.1156,454.7224,103.0685,-221.7499,-23.6898,-259.2362,-110.509,-261.0039,219.2391,-139.9404,155.7723,377.9713,434.0318,-365.1397,459.1471,-318.5774,323.4256,194.325,-311.9529,-153.9019,-346.5811,76.4069,443.2121,-199.407,495.6636,-138.5213,-145.3432,-151.7758,-365.3547,263.6507,-491.1686,-183.5585,-12.6044,318.5346,-443.8639,-179.0338,477.9093,-355.5118,-423.0035,-229.1166,-96.7782,-479.2384,192.9085,223.3407,-302.9472,297.3847,477.584,-297.5958,168.6023,-80.6912,-89.8717,87.1476,-129.7807,346.5576,-253.9729,-399.6858,-389.5785,35.1648,-180.451,-49.6084,83.9582,-185.2329,97.283,195.5249,-91.6969,199.202,-449.792,333.4825,-113.7558,443.434,394.3587,-94.9074,71.2092,-251.1774,-85.047,-46.4004,20.2595,341.1073,-91.2527,86.3775,303.1247,-336.9011,343.9894,-384.1261,154.4411,-465.2493,-63.3249,488.0231,348.6725,458.2093,322.401,220.2532,283.3734,-386.4252,-256.5262,-87.2205,96.8199,47.6908,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-84.9038,358.3475,-88.9271,499.7721,-161.7403,355.4894,313.6211,-176.1703,61.8427,107.603,-176.063,-426.5408,292.3612,58.3331,-115.8853,471.4131,-76.4815,-309.6263,361.4518,192.4763,-145.7968,256.3888,133.335,-474.0901,-366.9793,-495.223,457.2366,170.056,285.0152,89.8213,225.2251,354.1822,-298.374,-332.9164,-55.2409,306.9283,25.9392,218.0624,7.5085,-151.8768,-155.4932,6.0001,201.4506,-259.9874,485.1078,-362.8516,-230.1434,-398.2512,243.0012,32.302,-197.91,144.1195,-89.4196,-44.0399,-371.7866,227.6007,492.7526,499.3824,162.2475,279.0325,177.0781,341.0137,199.6009,108.1678,312.2319,-211.5001,-92.675,357.0513,-337.924,-348.984,-350.3677,173.3473,-193.7346,-318.5609,-2.0928,46.6287,-346.8513,36.634,-277.4949,-149.325,481.1378,370.3864,-139.6689,-332.2805,48.0292,109.8363,494.6994,373.6992,495.7442,400.4998,-26.2276,-308.7669,188.9497,257.9182,-116.6944,269.8932,197.005,123.1139,-356.2058,485.1982,-4.0119,397.8434,-204.67,-494.5133,-414.1299,142.1512,-36.5446,390.0718,6.9876,263.1216,457.5598,89.6086,-266.3804,17.3457,88.8182,236.6271,81.175,-170.2249,-5.7664,422.7852,180.3349,-135.2642,149.2285,-70.6607,-46.169,-389.3313,230.6125,388.4853,-438.3426,111.8034,300.0416,37.5604,-437.3868,-114.1336,312.7777,-99.1161,-312.9015,-147.3787,-434.0536,19.5034,141.706,-281.4504,-208.9608,281.4619,-361.0596,-464.2757,77.8205,232.5575,165.4104,424.8738,124.5555,342.038,86.7543,278.0216,311.2686,337.834,-90.0545,-210.1143,-488.4095,-80.7535,92.3731,-122.622,-288.0571,1.7285,-5.2998,100.0717,-395.0571,-477.5587,-160.5642,-119.4214,-232.233,415.7276,-204.3216,-436.7766,-103.4644,-427.0939,-31.0927,-440.2919,120.5971,-223.3623,-199.0988,304.8697,432.5731,-231.5791,-397.696,306.4134,330.1018,32.4345,-175.719,464.6091,-291.5686,300.1631,-167.4592,238.9574,104.5893,-187.2215,-294.0111,-361.9094,480.6847,-304.2133,-448.7144,67.7235,-255.9669,254.7379,464.5465,6.8909,-368.7554,337.5993,39.1928,-376.0625,433.4224,-109.1488,341.7731,377.843,446.839,-192.283,251.1592,437.6812,-478.3409 ] - do: indices.get_mapping: @@ -189,7 +189,7 @@ setup: match: "*dense_vector*" mapping: type: dense_vector - dims: 3000 + dims: 5000 index: true similarity: cosine diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java index f201685294058..ba1a8b7919963 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESIntegTestCase; @@ -1236,10 +1237,7 @@ private ClusterAllocationExplanation runExplain(boolean primary, String nodeId, .get() .getExplanation(); if (logger.isDebugEnabled()) { - XContentBuilder builder = JsonXContent.contentBuilder(); - builder.prettyPrint(); - builder.humanReadable(true); - logger.debug("--> explain json output: \n{}", Strings.toString(explanation.toXContent(builder, ToXContent.EMPTY_PARAMS))); + logger.debug("--> explain json output: \n{}", Strings.toString(explanation, true, true)); } return explanation; } @@ -1304,7 +1302,7 @@ private DiscoveryNode replicaNode() { private XContentParser getParser(ClusterAllocationExplanation explanation) throws IOException { XContentBuilder builder = JsonXContent.contentBuilder(); - return createParser(explanation.toXContent(builder, ToXContent.EMPTY_PARAMS)); + return createParser(ChunkedToXContent.wrapAsToXContent(explanation).toXContent(builder, ToXContent.EMPTY_PARAMS)); } private void verifyShardInfo(XContentParser parser, boolean primary, boolean includeDiskInfo, ShardRoutingState state) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 373213be479a7..b869b3a90fbce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -51,7 +51,6 @@ import org.elasticsearch.snapshots.SnapshotInfoTestUtils; import org.elasticsearch.snapshots.SnapshotsInProgressSerializationTests; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.VersionUtils; import java.util.Collections; @@ -80,8 +79,8 @@ public void testClusterStateDiffSerialization() throws Exception { DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")) .nodes(discoveryNodes) - .putTransportVersion("master", TransportVersionUtils.randomVersion(random())) - .putTransportVersion("other", TransportVersionUtils.randomVersion(random())) + .putCompatibilityVersions("master", CompatibilityVersionsUtils.fakeSystemIndicesRandom()) + .putCompatibilityVersions("other", CompatibilityVersionsUtils.fakeSystemIndicesRandom()) .build(); ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes( ClusterState.Builder.toBytes(clusterState), @@ -250,7 +249,7 @@ private ClusterState.Builder randomNodes(ClusterState clusterState) { versions.put(id, CompatibilityVersionsUtils.fakeSystemIndicesRandom()); } - return ClusterState.builder(clusterState).nodes(nodes).compatibilityVersions(versions); + return ClusterState.builder(clusterState).nodes(nodes).nodeIdsToCompatibilityVersions(versions); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 1a4c908306648..58b824e4676f2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -37,9 +37,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java index 9660be019383a..448bfdb301aeb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceIT.java @@ -22,9 +22,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java index ba59d74768fa2..de42c95d79476 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java @@ -27,9 +27,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java index 53b568f0a25ef..a4333d51163dc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/HealthServiceIT.java @@ -27,10 +27,10 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java index bab4b2c088826..60a1e7b5bddbc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/FinalPipelineIT.java @@ -38,9 +38,9 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java index d1095354e1126..bbb6ae9d16fc5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/SettingsListenerIT.java @@ -22,10 +22,10 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java index 0a090fa889a29..79c6dca764250 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/TestSystemIndexDescriptor.java @@ -37,6 +37,8 @@ public class TestSystemIndexDescriptor extends SystemIndexDescriptor { .put(IndexMetadata.INDEX_AUTO_EXPAND_REPLICAS_SETTING.getKey(), "0-1") .put(IndexMetadata.SETTING_PRIORITY, Integer.MAX_VALUE) .build(); + private static final int NEW_MAPPINGS_VERSION = 1; + private static final int OLD_MAPPINGS_VERSION = 0; TestSystemIndexDescriptor() { super( @@ -90,6 +92,11 @@ public String getMappings() { return useNewMappings.get() ? getNewMappings() : getOldMappings(); } + @Override + public MappingsVersion getMappingsVersion() { + return useNewMappings.get() ? new MappingsVersion(NEW_MAPPINGS_VERSION, 0) : new MappingsVersion(OLD_MAPPINGS_VERSION, 0); + } + public static String getOldMappings() { try { final XContentBuilder builder = jsonBuilder(); @@ -97,7 +104,7 @@ public static String getOldMappings() { builder.startObject(); { builder.startObject("_meta"); - builder.field(SystemIndexDescriptor.VERSION_META_KEY, 0); + builder.field(SystemIndexDescriptor.VERSION_META_KEY, OLD_MAPPINGS_VERSION); builder.field("version", Version.CURRENT.previousMajor().toString()); builder.endObject(); @@ -124,7 +131,7 @@ public static String getNewMappings() { builder.startObject(); { builder.startObject("_meta"); - builder.field(SystemIndexDescriptor.VERSION_META_KEY, 1); + builder.field(SystemIndexDescriptor.VERSION_META_KEY, NEW_MAPPINGS_VERSION); builder.field("version", Version.CURRENT.toString()); builder.endObject(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index c09cc7245074d..7ba3b5814eb4b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -24,6 +24,7 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -36,10 +37,15 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.coordination.ApplyCommitRequest; +import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -57,6 +63,7 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.search.Queries; @@ -64,6 +71,10 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.gateway.ReplicaShardAllocatorIT; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; @@ -104,6 +115,7 @@ import org.elasticsearch.test.engine.MockEngineSupport; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.transport.TestTransportChannel; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.XContentType; @@ -136,6 +148,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; @@ -1614,6 +1627,126 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { ); } + public void testWaitForClusterStateToBeAppliedOnSourceNode() throws Exception { + internalCluster().startMasterOnlyNode(); + final var primaryNode = internalCluster().startDataOnlyNode(); + String indexName = "test-index"; + createIndex(indexName, indexSettings(1, 0).build()); + ensureGreen(indexName); + final List indexRequests = IntStream.range(0, between(10, 500)) + .mapToObj(n -> client().prepareIndex(indexName).setSource("foo", "bar")) + .toList(); + indexRandom(randomBoolean(), true, true, indexRequests); + assertThat(indicesAdmin().prepareFlush(indexName).get().getFailedShards(), equalTo(0)); + + final var replicaNode = internalCluster().startDataOnlyNode(); + + final long initialClusterStateVersion = clusterService().state().version(); + + // Helper class to encapsulate the sync mechanism that delays applying cluster states on the primary node until the replica gives + // the go-ahead. + class ClusterStateSyncListeners implements Releasable { + private final Map> clusterStateBarriers = ConcurrentCollections.newConcurrentMap(); + private final SubscribableListener startRecoveryListener = new SubscribableListener<>(); + + private final CountDownLatch completeLatch = new CountDownLatch(1); + private final RefCounted refCounted = AbstractRefCounted.of(completeLatch::countDown); + private final List cleanup = new ArrayList<>(2); + + @Override + public void close() { + refCounted.decRef(); + safeAwait(completeLatch); + cleanup.forEach(Runnable::run); + clusterStateBarriers.values().forEach(l -> l.onResponse(null)); + } + + void addCleanup(Runnable runnable) { + cleanup.add(runnable); + } + + SubscribableListener getStateApplyDelayListener(long clusterStateVersion) { + assertThat(clusterStateVersion, greaterThanOrEqualTo(initialClusterStateVersion)); + if (refCounted.tryIncRef()) { + try { + return clusterStateBarriers.computeIfAbsent(clusterStateVersion, ignored -> new SubscribableListener<>()); + } finally { + refCounted.decRef(); + } + } else { + return SubscribableListener.newSucceeded(null); + } + } + + void onStartRecovery() { + Thread.yield(); + assertFalse(startRecoveryListener.isDone()); + startRecoveryListener.onResponse(null); + } + + public void delayUntilRecoveryStart(SubscribableListener listener) { + assertFalse(startRecoveryListener.isDone()); + startRecoveryListener.addListener(listener); + } + } + + try (var clusterStateSyncListeners = new ClusterStateSyncListeners()) { + final var primaryNodeTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + ); + primaryNodeTransportService.addRequestHandlingBehavior( + Coordinator.COMMIT_STATE_ACTION_NAME, + (handler, request, channel, task) -> { + assertThat(request, instanceOf(ApplyCommitRequest.class)); + clusterStateSyncListeners.getStateApplyDelayListener(((ApplyCommitRequest) request).getVersion()) + .addListener( + ActionListener.wrap(ignored -> handler.messageReceived(request, channel, task), e -> fail(e, "unexpected")) + ); + } + ); + primaryNodeTransportService.addRequestHandlingBehavior( + PeerRecoverySourceService.Actions.START_RECOVERY, + (handler, request, channel, task) -> { + assertThat(request, instanceOf(StartRecoveryRequest.class)); + assertThat(((StartRecoveryRequest) request).clusterStateVersion(), greaterThan(initialClusterStateVersion)); + handler.messageReceived( + request, + new TestTransportChannel( + new ChannelActionListener<>(channel).delegateResponse( + (l, e) -> fail(e, "recovery should succeed on first attempt") + ) + ), + task + ); + clusterStateSyncListeners.onStartRecovery(); + } + ); + clusterStateSyncListeners.addCleanup(primaryNodeTransportService::clearInboundRules); + + final var replicaClusterService = internalCluster().getInstance(ClusterService.class, replicaNode); + final ClusterStateListener clusterStateListener = event -> { + final var primaryProceedListener = clusterStateSyncListeners.getStateApplyDelayListener(event.state().version()); + final var indexRoutingTable = event.state().routingTable().index(indexName); + assertNotNull(indexRoutingTable); + final var indexShardRoutingTable = indexRoutingTable.shard(0); + if (indexShardRoutingTable.size() == 2 && indexShardRoutingTable.getAllInitializingShards().isEmpty() == false) { + // this is the cluster state update which starts the recovery, so delay the primary node application until recovery + // has started + clusterStateSyncListeners.delayUntilRecoveryStart(primaryProceedListener); + } else { + // this is some other cluster state update, so we must let it proceed now + primaryProceedListener.onResponse(null); + } + }; + replicaClusterService.addListener(clusterStateListener); + clusterStateSyncListeners.addCleanup(() -> replicaClusterService.removeListener(clusterStateListener)); + + updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1), indexName); + ensureGreen(indexName); + } + } + private void assertGlobalCheckpointIsStableAndSyncedInAllNodes(String indexName, List nodes, int shard) throws Exception { assertThat(nodes, is(not(empty()))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java index 959611f9fd855..f5453c8ddf1f1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestAsyncProcessorIT.java @@ -27,9 +27,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 472707babf155..c6e6a207ae6c0 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -279,7 +279,11 @@ exports org.elasticsearch.indices.recovery.plan; exports org.elasticsearch.indices.store; exports org.elasticsearch.ingest; - exports org.elasticsearch.internal to org.elasticsearch.serverless.version, org.elasticsearch.serverless.buildinfo; + exports org.elasticsearch.internal + to + org.elasticsearch.serverless.version, + org.elasticsearch.serverless.buildinfo, + org.elasticsearch.serverless.constants; exports org.elasticsearch.lucene.analysis.miscellaneous; exports org.elasticsearch.lucene.grouping; exports org.elasticsearch.lucene.queries; @@ -363,7 +367,6 @@ exports org.elasticsearch.synonyms; exports org.elasticsearch.tasks; exports org.elasticsearch.threadpool; - exports org.elasticsearch.tracing; exports org.elasticsearch.transport; exports org.elasticsearch.upgrades; exports org.elasticsearch.usage; @@ -379,6 +382,7 @@ org.elasticsearch.settings.secure, org.elasticsearch.serverless.constants, org.elasticsearch.serverless.apifiltering; + exports org.elasticsearch.telemetry.tracing; provides java.util.spi.CalendarDataProvider with org.elasticsearch.common.time.IsoCalendarDataProvider; provides org.elasticsearch.xcontent.ErrorOnUnknown with org.elasticsearch.common.xcontent.SuggestingErrorOnUnknown; diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 2aae6befb673a..92bb88f16385d 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -115,7 +115,7 @@ private static TransportVersion findCurrent() { if (versionExtension == null) { return TransportVersions.LATEST_DEFINED; } - var version = versionExtension.getCurrentTransportVersion(); + var version = versionExtension.getCurrentTransportVersion(TransportVersions.LATEST_DEFINED); assert version.onOrAfter(TransportVersions.LATEST_DEFINED); return version; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 16a99d38f6623..170f9140bae21 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -141,6 +141,8 @@ static TransportVersion def(int id) { public static final TransportVersion V_8_500_074 = def(8_500_074); public static final TransportVersion NODE_INFO_INDEX_VERSION_ADDED = def(8_500_075); public static final TransportVersion FIRST_NEW_ID_LAYOUT = def(8_501_00_0); + public static final TransportVersion COMMIT_PRIMARY_TERM_GENERATION = def(8_501_00_1); + public static final TransportVersion WAIT_FOR_CLUSTER_STATE_IN_RECOVERY_ADDED = def(8_502_00_0); /* * STOP! READ THIS FIRST! No, really, * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ @@ -216,7 +218,7 @@ static TransportVersion def(int id) { IDS = null; } - static NavigableMap getAllVersionIds(Class cls) { + public static NavigableMap getAllVersionIds(Class cls) { Map versionIdFields = new HashMap<>(); NavigableMap builder = new TreeMap<>(); diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index f0b755f1108cd..7395d6003ec44 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -457,8 +457,8 @@ import org.elasticsearch.rest.action.synonyms.RestPutSynonymRuleAction; import org.elasticsearch.rest.action.synonyms.RestPutSynonymsAction; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.usage.UsageService; import java.time.Instant; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java index 89f40d407bb49..39baf25f5dada 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainResponse.java @@ -11,13 +11,16 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Iterator; /** * Explanation response for a shard in the cluster */ -public class ClusterAllocationExplainResponse extends ActionResponse { +public class ClusterAllocationExplainResponse extends ActionResponse implements ChunkedToXContentObject { private ClusterAllocationExplanation cae; @@ -41,4 +44,9 @@ public ClusterAllocationExplanation getExplanation() { public void writeTo(StreamOutput out) throws IOException { cae.writeTo(out); } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + return cae.toXContentChunked(params); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index 34c1bb4a0c85f..d22bae9c5a4b1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -16,27 +16,32 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationDecision; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.time.Instant; +import java.util.Collections; +import java.util.Iterator; import java.util.Locale; import static org.elasticsearch.cluster.routing.allocation.AbstractAllocationDecision.discoveryNodeToXContent; +import static org.elasticsearch.common.xcontent.ChunkedToXContentHelper.singleChunk; /** * A {@code ClusterAllocationExplanation} is an explanation of why a shard is unassigned, * or if it is not unassigned, then which nodes it could possibly be relocated to. * It is an immutable class. */ -public final class ClusterAllocationExplanation implements ToXContentObject, Writeable { +public final class ClusterAllocationExplanation implements ChunkedToXContentObject, Writeable { static final String NO_SHARD_SPECIFIED_MESSAGE = "No shard was specified in the explain API request, so this response " + "explains a randomly chosen unassigned shard. There may be other unassigned shards in this cluster which cannot be assigned for " @@ -156,9 +161,10 @@ public ShardAllocationDecision getShardAllocationDecision() { return shardAllocationDecision; } - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - { + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat(singleChunk((builder, p) -> { + builder.startObject(); + if (isSpecificShard() == false) { builder.field("note", NO_SHARD_SPECIFIED_MESSAGE); } @@ -169,48 +175,52 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (shardRouting.unassignedInfo() != null) { unassignedInfoToXContent(shardRouting.unassignedInfo(), builder); } - if (currentNode != null) { + + if (this.currentNode != null) { builder.startObject("current_node"); - { - discoveryNodeToXContent(currentNode, true, builder); - if (shardAllocationDecision.getMoveDecision().isDecisionTaken() - && shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) { - builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking()); - } + discoveryNodeToXContent(this.currentNode, true, builder); + if (shardAllocationDecision.getMoveDecision().isDecisionTaken() + && shardAllocationDecision.getMoveDecision().getCurrentNodeRanking() > 0) { + builder.field("weight_ranking", shardAllocationDecision.getMoveDecision().getCurrentNodeRanking()); } builder.endObject(); } - if (this.clusterInfo != null) { - builder.startObject("cluster_info"); - { - // This field might be huge, TODO add chunking support here - ChunkedToXContent.wrapAsToXContent(clusterInfo).toXContent(builder, params); - } - builder.endObject(); // end "cluster_info" - } - if (shardAllocationDecision.isDecisionTaken()) { - shardAllocationDecision.toXContent(builder, params); + + return builder; + }), + this.clusterInfo != null + ? Iterators.concat( + ChunkedToXContentHelper.startObject("cluster_info"), + this.clusterInfo.toXContentChunked(params), + ChunkedToXContentHelper.endObject() + ) + : Collections.emptyIterator(), + getShardAllocationDecisionChunked(params), + Iterators.single((builder, p) -> builder.endObject()) + ); + } + + private Iterator getShardAllocationDecisionChunked(ToXContent.Params params) { + if (shardAllocationDecision.isDecisionTaken()) { + return shardAllocationDecision.toXContentChunked(params); + } else { + String explanation; + if (shardRouting.state() == ShardRoutingState.RELOCATING) { + explanation = "the shard is in the process of relocating from node [" + + currentNode.getName() + + "] " + + "to node [" + + relocationTargetNode.getName() + + "], wait until relocation has completed"; } else { - String explanation; - if (shardRouting.state() == ShardRoutingState.RELOCATING) { - explanation = "the shard is in the process of relocating from node [" - + currentNode.getName() - + "] " - + "to node [" - + relocationTargetNode.getName() - + "], wait until relocation has completed"; - } else { - assert shardRouting.state() == ShardRoutingState.INITIALIZING; - explanation = "the shard is in the process of initializing on node [" - + currentNode.getName() - + "], " - + "wait until initialization has completed"; - } - builder.field("explanation", explanation); + assert shardRouting.state() == ShardRoutingState.INITIALIZING; + explanation = "the shard is in the process of initializing on node [" + + currentNode.getName() + + "], " + + "wait until initialization has completed"; } + return Iterators.single((builder, p) -> builder.field("explanation", explanation)); } - builder.endObject(); // end wrapping object - return builder; } private static XContentBuilder unassignedInfoToXContent(UnassignedInfo unassignedInfo, XContentBuilder builder) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index c33bc841190a0..c2684c4becf3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -151,7 +151,7 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi if (request.nodes()) { builder.nodes(currentState.nodes()); - builder.compatibilityVersions(getCompatibilityVersions(currentState)); + builder.nodeIdsToCompatibilityVersions(getCompatibilityVersions(currentState)); } if (request.routingTable()) { if (request.indices().length > 0) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java index 913c16fa33c46..c62b689d58e78 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/AutoCreateAction.java @@ -283,10 +283,10 @@ ClusterState execute( if (isManagedSystemIndex) { final SystemIndexDescriptor descriptor = mainDescriptor.getDescriptorCompatibleWith( - currentState.nodes().getSmallestNonClientNodeVersion() + currentState.getMinSystemIndexMappingVersions().get(mainDescriptor.getPrimaryIndex()) ); if (descriptor == null) { - final String message = mainDescriptor.getMinimumNodeVersionMessage("auto-create index"); + final String message = mainDescriptor.getMinimumMappingsVersionMessage("auto-create index"); logger.warn(message); throw new IllegalStateException(message); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 3e72500cc016b..3deb70df92d88 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -132,10 +132,10 @@ protected void masterOperation( // the index to the latest settings. if (isManagedSystemIndex && Strings.isNullOrEmpty(request.origin())) { final SystemIndexDescriptor descriptor = mainDescriptor.getDescriptorCompatibleWith( - state.nodes().getSmallestNonClientNodeVersion() + state.getMinSystemIndexMappingVersions().get(mainDescriptor.getPrimaryIndex()) ); if (descriptor == null) { - final String message = mainDescriptor.getMinimumNodeVersionMessage("create index"); + final String message = mainDescriptor.getMinimumMappingsVersionMessage("create index"); logger.warn(message); listener.onFailure(new IllegalStateException(message)); return; diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java index 75e5fcfa3fa0f..6a32959b7e5f7 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java @@ -8,6 +8,8 @@ package org.elasticsearch.bootstrap; +import org.elasticsearch.common.ReferenceDocs; + import java.util.Objects; /** @@ -59,4 +61,6 @@ default boolean alwaysEnforce() { return false; } + ReferenceDocs referenceDocs(); + } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java index b9610c689f92e..a99ed225b244b 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/BootstrapChecks.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -131,10 +132,11 @@ static void check(final BootstrapContext context, final boolean enforceLimits, f for (final BootstrapCheck check : checks) { final BootstrapCheck.BootstrapCheckResult result = check.check(context); if (result.isFailure()) { + final String message = result.getMessage() + "; for more information see [" + check.referenceDocs() + "]"; if (enforceLimits == false && enforceBootstrapChecks == false && check.alwaysEnforce() == false) { - ignoredErrors.add(result.getMessage()); + ignoredErrors.add(message); } else { - errors.add(result.getMessage()); + errors.add(message); } } } @@ -150,7 +152,9 @@ static void check(final BootstrapContext context, final boolean enforceLimits, f + errors.size() + "] bootstrap checks failed. You must address the points described in the following [" + errors.size() - + "] lines before starting Elasticsearch." + + "] lines before starting Elasticsearch. For more information see [" + + ReferenceDocs.BOOTSTRAP_CHECKS + + "]" ); for (int i = 0; i < errors.size(); i++) { messages.add("bootstrap check failure [" + (i + 1) + "] of [" + errors.size() + "]: " + errors.get(i)); @@ -240,6 +244,11 @@ public BootstrapCheckResult check(BootstrapContext context) { } } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_HEAP_SIZE; + } + // visible for testing long getInitialHeapSize() { return JvmInfo.jvmInfo().getConfiguredInitialHeapSize(); @@ -298,6 +307,11 @@ public final BootstrapCheckResult check(BootstrapContext context) { } } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_FILE_DESCRIPTOR; + } + // visible for testing long getMaxFileDescriptorCount() { return ProcessProbe.getMaxFileDescriptorCount(); @@ -321,6 +335,11 @@ boolean isMemoryLocked() { return Natives.isMemoryLocked(); } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_MEMORY_LOCK; + } + } static class MaxNumberOfThreadsCheck implements BootstrapCheck { @@ -349,6 +368,10 @@ long getMaxNumberOfThreads() { return JNANatives.MAX_NUMBER_OF_THREADS; } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_MAX_NUMBER_THREADS; + } } static class MaxSizeVirtualMemoryCheck implements BootstrapCheck { @@ -378,6 +401,10 @@ long getMaxSizeVirtualMemory() { return JNANatives.MAX_SIZE_VIRTUAL_MEMORY; } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_MAX_SIZE_VIRTUAL_MEMORY; + } } /** @@ -409,6 +436,10 @@ long getMaxFileSize() { return JNANatives.MAX_FILE_SIZE; } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_MAX_FILE_SIZE; + } } static class MaxMapCountCheck implements BootstrapCheck { @@ -478,6 +509,10 @@ static long parseProcSysVmMaxMapCount(final String procSysVmMaxMapCount) throws return Long.parseLong(procSysVmMaxMapCount); } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_MAXIMUM_MAP_COUNT; + } } static class ClientJvmCheck implements BootstrapCheck { @@ -501,6 +536,10 @@ String getVmName() { return JvmInfo.jvmInfo().getVmName(); } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_CLIENT_JVM; + } } /** @@ -529,6 +568,10 @@ String getUseSerialGC() { return JvmInfo.jvmInfo().useSerialGC(); } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_USE_SERIAL_COLLECTOR; + } } /** @@ -551,6 +594,10 @@ boolean isSystemCallFilterInstalled() { return Natives.isSystemCallFilterInstalled(); } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_SYSTEM_CALL_FILTER; + } } abstract static class MightForkCheck implements BootstrapCheck { @@ -579,6 +626,11 @@ public final boolean alwaysEnforce() { return true; } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_ONERROR_AND_ONOUTOFMEMORYERROR; + } + } static class OnErrorCheck extends MightForkCheck { @@ -658,6 +710,11 @@ String javaVersion() { return Constants.JAVA_VERSION; } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_EARLY_ACCESS; + } + } static class AllPermissionCheck implements BootstrapCheck { @@ -681,6 +738,10 @@ boolean isAllPermissionGranted() { return true; } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_ALL_PERMISSION; + } } static class DiscoveryConfiguredCheck implements BootstrapCheck { @@ -703,6 +764,11 @@ public BootstrapCheckResult check(BootstrapContext context) { ) ); } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_DISCOVERY_CONFIGURATION; + } } static class ByteOrderCheck implements BootstrapCheck { @@ -718,5 +784,10 @@ public BootstrapCheckResult check(BootstrapContext context) { ByteOrder nativeByteOrder() { return ByteOrder.nativeOrder(); } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECKS; + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 4444ab72ca15e..ee94008372dab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -416,10 +416,6 @@ public AllocationService getAllocationService() { return allocationService; } - public MetadataDeleteIndexService getMetadataDeleteIndexService() { - return metadataDeleteIndexService; - } - @Override protected void configure() { bind(GatewayAllocator.class).asEagerSingleton(); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 603a93ab11f79..aa1fba7aecc81 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -284,6 +284,12 @@ public Map compatibilityVersions() { return this.compatibilityVersions; } + public boolean hasMixedSystemIndexVersions() { + return compatibilityVersions.values() + .stream() + .anyMatch(e -> e.systemIndexMappingsVersion().equals(minVersions.systemIndexMappingsVersion()) == false); + } + public TransportVersion getMinTransportVersion() { return this.minVersions.transportVersion(); } @@ -777,13 +783,35 @@ public DiscoveryNodes nodes() { return nodes; } + // Deprecate to keep downstream projects compiling + @Deprecated(forRemoval = true) public Builder putTransportVersion(String nodeId, TransportVersion transportVersion) { - // TODO[wrb]: system index mappings versions will be added in a followup - compatibilityVersions.put(nodeId, new CompatibilityVersions(Objects.requireNonNull(transportVersion, nodeId), Map.of())); + return putCompatibilityVersions(nodeId, transportVersion, Map.of()); + } + + public Builder putCompatibilityVersions( + String nodeId, + TransportVersion transportVersion, + Map systemIndexMappingsVersions + ) { + return putCompatibilityVersions( + nodeId, + new CompatibilityVersions(Objects.requireNonNull(transportVersion, nodeId), systemIndexMappingsVersions) + ); + } + + public Builder putCompatibilityVersions(String nodeId, CompatibilityVersions versions) { + compatibilityVersions.put(nodeId, versions); return this; } + // Deprecate to keep downstream projects compiling + @Deprecated(forRemoval = true) public Builder compatibilityVersions(Map versions) { + return nodeIdsToCompatibilityVersions(versions); + } + + public Builder nodeIdsToCompatibilityVersions(Map versions) { versions.forEach((key, value) -> Objects.requireNonNull(value, key)); // remove all versions not present in the new map this.compatibilityVersions.keySet().retainAll(versions.keySet()); @@ -923,11 +951,15 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr builder.routingTable = RoutingTable.readFrom(in); builder.nodes = DiscoveryNodes.readFrom(in, localNode); if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { - builder.compatibilityVersions(in.readMap(CompatibilityVersions::readVersion)); + builder.nodeIdsToCompatibilityVersions(in.readMap(CompatibilityVersions::readVersion)); } else { // this clusterstate is from a pre-8.8.0 node // infer the versions from discoverynodes for now - builder.nodes().getNodes().values().forEach(n -> builder.putTransportVersion(n.getId(), inferTransportVersion(n))); + // leave mappings versions empty + builder.nodes() + .getNodes() + .values() + .forEach(n -> builder.putCompatibilityVersions(n.getId(), inferTransportVersion(n), Map.of())); } builder.blocks = ClusterBlocks.readFrom(in); int customSize = in.readVInt(); @@ -1076,10 +1108,14 @@ public ClusterState apply(ClusterState state) { builder.routingTable(routingTable.apply(state.routingTable)); builder.nodes(nodes.apply(state.nodes)); if (versions != null) { - builder.compatibilityVersions(this.versions.apply(state.compatibilityVersions)); + builder.nodeIdsToCompatibilityVersions(this.versions.apply(state.compatibilityVersions)); } else { // infer the versions from discoverynodes for now - builder.nodes().getNodes().values().forEach(n -> builder.putTransportVersion(n.getId(), inferTransportVersion(n))); + // leave mappings versions empty + builder.nodes() + .getNodes() + .values() + .forEach(n -> builder.putCompatibilityVersions(n.getId(), inferTransportVersion(n), Map.of())); } builder.metadata(metadata.apply(state.metadata)); builder.blocks(blocks.apply(state.blocks)); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 7ccea8e99918b..619a7e09ee651 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ListenableActionFuture; @@ -175,6 +174,7 @@ public class Coordinator extends AbstractLifecycleComponent implements ClusterSt private final LagDetector lagDetector; private final ClusterFormationFailureHelper clusterFormationFailureHelper; private final JoinReasonService joinReasonService; + private final CompatibilityVersions compatibilityVersions; private Mode mode; private Optional lastKnownLeader; @@ -317,6 +317,7 @@ public Coordinator( this.peerFinderListeners = new CopyOnWriteArrayList<>(); this.peerFinderListeners.add(clusterBootstrapService); this.leaderHeartbeatService = leaderHeartbeatService; + this.compatibilityVersions = compatibilityVersions; } /** @@ -1064,7 +1065,7 @@ protected void doStart() { .addGlobalBlock(noMasterBlockService.getNoMasterBlock()) ) .nodes(DiscoveryNodes.builder().add(getLocalNode()).localNodeId(getLocalNode().getId())) - .putTransportVersion(getLocalNode().getId(), TransportVersion.current()) + .putCompatibilityVersions(getLocalNode().getId(), compatibilityVersions) .metadata(metadata) .build(); applierState = initialState; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index dd52f20c7355a..170648452d141 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -222,7 +222,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } final ClusterState clusterStateWithNewNodesAndDesiredNodes = DesiredNodes.updateDesiredNodesStatusIfNeeded( - newState.nodes(nodesBuilder).compatibilityVersions(compatibilityVersionsMap).build() + newState.nodes(nodesBuilder).nodeIdsToCompatibilityVersions(compatibilityVersionsMap).build() ); final ClusterState updatedState = allocationService.adaptAutoExpandReplicas(clusterStateWithNewNodesAndDesiredNodes); assert enforceVersionBarrier == false @@ -295,7 +295,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( // or removed by us above ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) - .compatibilityVersions(compatibilityVersions) + .nodeIdsToCompatibilityVersions(compatibilityVersions) .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .metadata( Metadata.builder(currentState.metadata()) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java index 68c611aeef9a6..39230d0255ae7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -107,7 +107,10 @@ protected ClusterState remainingNodesClusterState( DiscoveryNodes.Builder remainingNodesBuilder, Map compatibilityVersions ) { - return ClusterState.builder(currentState).nodes(remainingNodesBuilder).compatibilityVersions(compatibilityVersions).build(); + return ClusterState.builder(currentState) + .nodes(remainingNodesBuilder) + .nodeIdsToCompatibilityVersions(compatibilityVersions) + .build(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 3fb6eafb5c606..8423a5ad37334 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -155,15 +155,6 @@ static ClusterState updateDataLifecycle( Metadata.Builder builder = Metadata.builder(metadata); for (var dataStreamName : dataStreamNames) { var dataStream = validateDataStream(metadata, dataStreamName); - if (dataStream.isSystem()) { - if (lifecycle != null && lifecycle.getDownsamplingRounds() != null) { - throw new IllegalArgumentException( - "System data streams do not support downsampling as part of their lifecycle configuration. Encountered [" - + dataStream.getName() - + "] in the request" - ); - } - } builder.put( new DataStream( dataStream.getName(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java index 9a932200f78e1..516be12d56a6b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexService.java @@ -62,7 +62,7 @@ public Tuple executeTask( DeleteIndexClusterStateUpdateRequest task, ClusterState clusterState ) { - return Tuple.tuple(deleteIndices(clusterState, Sets.newHashSet(task.indices())), task); + return Tuple.tuple(MetadataDeleteIndexService.deleteIndices(clusterState, Sets.newHashSet(task.indices()), settings), task); } @Override @@ -90,7 +90,7 @@ public void deleteIndices(final DeleteIndexClusterStateUpdateRequest request) { /** * Delete some indices from the cluster state. */ - public ClusterState deleteIndices(ClusterState currentState, Set indices) { + public static ClusterState deleteIndices(ClusterState currentState, Set indices, Settings settings) { final Metadata meta = currentState.metadata(); final Set indicesToDelete = new HashSet<>(); final Map backingIndices = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java index 9a1a05bc4ca8c..fb5acbdd2ac49 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesService.java @@ -54,22 +54,20 @@ public class MetadataIndexAliasesService { private final IndicesService indicesService; - private final MetadataDeleteIndexService deleteIndexService; - private final NamedXContentRegistry xContentRegistry; private final ClusterStateTaskExecutor executor; private final MasterServiceTaskQueue taskQueue; + private final ClusterService clusterService; @Inject public MetadataIndexAliasesService( ClusterService clusterService, IndicesService indicesService, - MetadataDeleteIndexService deleteIndexService, NamedXContentRegistry xContentRegistry ) { + this.clusterService = clusterService; this.indicesService = indicesService; - this.deleteIndexService = deleteIndexService; this.xContentRegistry = xContentRegistry; this.executor = new SimpleBatchedAckListenerTaskExecutor<>() { @@ -110,7 +108,7 @@ public ClusterState applyAliasActions(ClusterState currentState, Iterable sortNodeDecisions(List nodeDecisions, XContentBuilder builder, Params params) - throws IOException { - - if (nodeDecisions != null && nodeDecisions.isEmpty() == false) { - builder.startArray("node_allocation_decisions"); - { - for (NodeAllocationResult explanation : nodeDecisions) { - explanation.toXContent(builder, params); - } - } - builder.endArray(); + public static Iterator nodeDecisionsToXContentChunked(List nodeDecisions) { + if (nodeDecisions == null || nodeDecisions.isEmpty()) { + return Collections.emptyIterator(); } - return builder; + + return Iterators.concat( + ChunkedToXContentHelper.startArray("node_allocation_decisions"), + nodeDecisions.iterator(), + ChunkedToXContentHelper.endArray() + ); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java index 1d7e0e4a1edbb..d7bcacd3a0cde 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocateUnassignedDecision.java @@ -12,15 +12,17 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; import java.util.Collections; import java.util.EnumMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -291,28 +293,33 @@ && getNodeDecisions().stream() } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public Iterator toXContentChunked(ToXContent.Params params) { checkDecisionState(); - builder.field("can_allocate", getAllocationDecision()); - builder.field("allocate_explanation", getExplanation()); - if (targetNode != null) { - builder.startObject("target_node"); - discoveryNodeToXContent(targetNode, true, builder); - builder.endObject(); - } - if (allocationId != null) { - builder.field("allocation_id", allocationId); - } - if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) { - builder.humanReadableField( - "configured_delay_in_millis", - "configured_delay", - TimeValue.timeValueMillis(configuredDelayInMillis) - ); - builder.humanReadableField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayInMillis)); - } - nodeDecisionsToXContent(nodeDecisions, builder, params); - return builder; + return Iterators.concat(Iterators.single((builder, p) -> { + builder.field("can_allocate", getAllocationDecision()); + builder.field("allocate_explanation", getExplanation()); + if (targetNode != null) { + builder.startObject("target_node"); + discoveryNodeToXContent(targetNode, true, builder); + builder.endObject(); + } + if (allocationId != null) { + builder.field("allocation_id", allocationId); + } + if (allocationStatus == AllocationStatus.DELAYED_ALLOCATION) { + builder.humanReadableField( + "configured_delay_in_millis", + "configured_delay", + TimeValue.timeValueMillis(configuredDelayInMillis) + ); + builder.humanReadableField( + "remaining_delay_in_millis", + "remaining_delay", + TimeValue.timeValueMillis(remainingDelayInMillis) + ); + } + return builder; + }), nodeDecisionsToXContentChunked(nodeDecisions)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java index 69eacb70f057b..3819805316f26 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/MoveDecision.java @@ -11,12 +11,14 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Iterator; import java.util.List; import java.util.Objects; @@ -282,37 +284,38 @@ public String getExplanation() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + public Iterator toXContentChunked(ToXContent.Params params) { checkDecisionState(); - if (targetNode != null) { - builder.startObject("target_node"); - discoveryNodeToXContent(targetNode, true, builder); - builder.endObject(); - } - builder.field("can_remain_on_current_node", canRemain() ? "yes" : "no"); - if (canRemain() == false && canRemainDecision.getDecisions().isEmpty() == false) { - builder.startArray("can_remain_decisions"); - canRemainDecision.toXContent(builder, params); - builder.endArray(); - } - if (clusterRebalanceDecision != null) { - AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(clusterRebalanceDecision.type()); - builder.field("can_rebalance_cluster", rebalanceDecision); - if (rebalanceDecision != AllocationDecision.YES && clusterRebalanceDecision.getDecisions().isEmpty() == false) { - builder.startArray("can_rebalance_cluster_decisions"); - clusterRebalanceDecision.toXContent(builder, params); + return Iterators.concat(Iterators.single((builder, p) -> { + if (targetNode != null) { + builder.startObject("target_node"); + discoveryNodeToXContent(targetNode, true, builder); + builder.endObject(); + } + builder.field("can_remain_on_current_node", canRemain() ? "yes" : "no"); + if (canRemain() == false && canRemainDecision.getDecisions().isEmpty() == false) { + builder.startArray("can_remain_decisions"); + canRemainDecision.toXContent(builder, params); builder.endArray(); } - } - if (clusterRebalanceDecision != null) { - builder.field("can_rebalance_to_other_node", allocationDecision); - builder.field("rebalance_explanation", getExplanation()); - } else { - builder.field("can_move_to_other_node", forceMove() ? "yes" : "no"); - builder.field("move_explanation", getExplanation()); - } - nodeDecisionsToXContent(nodeDecisions, builder, params); - return builder; + if (clusterRebalanceDecision != null) { + AllocationDecision rebalanceDecision = AllocationDecision.fromDecisionType(clusterRebalanceDecision.type()); + builder.field("can_rebalance_cluster", rebalanceDecision); + if (rebalanceDecision != AllocationDecision.YES && clusterRebalanceDecision.getDecisions().isEmpty() == false) { + builder.startArray("can_rebalance_cluster_decisions"); + clusterRebalanceDecision.toXContent(builder, params); + builder.endArray(); + } + } + if (clusterRebalanceDecision != null) { + builder.field("can_rebalance_to_other_node", allocationDecision); + builder.field("rebalance_explanation", getExplanation()); + } else { + builder.field("can_move_to_other_node", forceMove() ? "yes" : "no"); + builder.field("move_explanation", getExplanation()); + } + return builder; + }), nodeDecisionsToXContentChunked(nodeDecisions)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java index 30d033ba5d431..7f184386ec367 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardAllocationDecision.java @@ -8,13 +8,16 @@ package org.elasticsearch.cluster.routing.allocation; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xcontent.ToXContentFragment; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.xcontent.ToXContent; import java.io.IOException; +import java.util.Collections; +import java.util.Iterator; /** * Represents the decision taken for the allocation of a single shard. If @@ -29,7 +32,7 @@ * then both {@link #getAllocateDecision()} and {@link #getMoveDecision()} will return * objects whose {@code isDecisionTaken()} method returns {@code false}. */ -public final class ShardAllocationDecision implements ToXContentFragment, Writeable { +public final class ShardAllocationDecision implements ChunkedToXContentObject, Writeable { public static final ShardAllocationDecision NOT_TAKEN = new ShardAllocationDecision( AllocateUnassignedDecision.NOT_TAKEN, MoveDecision.NOT_TAKEN @@ -82,14 +85,11 @@ public MoveDecision getMoveDecision() { } @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (allocateDecision.isDecisionTaken()) { - allocateDecision.toXContent(builder, params); - } - if (moveDecision.isDecisionTaken()) { - moveDecision.toXContent(builder, params); - } - return builder; + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + allocateDecision.isDecisionTaken() ? allocateDecision.toXContentChunked(params) : Collections.emptyIterator(), + moveDecision.isDecisionTaken() ? moveDecision.toXContentChunked(params) : Collections.emptyIterator() + ); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index 711f0c84136e7..a54130aec95b6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -117,7 +117,7 @@ public ClusterState execute(BatchExecutionContext cont assert (recordedTv != null) || (context.initialState().nodes().nodeExists(e.getKey()) == false) : "Node " + e.getKey() + " is in the cluster but does not have an associated transport version recorded"; if (Objects.equals(recordedTv, INFERRED_TRANSPORT_VERSION)) { - builder.putTransportVersion(e.getKey(), e.getValue()); + builder.putCompatibilityVersions(e.getKey(), e.getValue(), Map.of()); // unknown mappings versions modified = true; } } diff --git a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java index 0f60dbff56cfa..1ff42b16252c8 100644 --- a/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java +++ b/server/src/main/java/org/elasticsearch/common/ReferenceDocs.java @@ -46,6 +46,28 @@ public enum ReferenceDocs { CONCURRENT_REPOSITORY_WRITERS, ARCHIVE_INDICES, HTTP_TRACER, + BOOTSTRAP_CHECK_HEAP_SIZE, + BOOTSTRAP_CHECK_FILE_DESCRIPTOR, + BOOTSTRAP_CHECK_MEMORY_LOCK, + BOOTSTRAP_CHECK_MAX_NUMBER_THREADS, + BOOTSTRAP_CHECK_MAX_FILE_SIZE, + BOOTSTRAP_CHECK_MAX_SIZE_VIRTUAL_MEMORY, + BOOTSTRAP_CHECK_MAXIMUM_MAP_COUNT, + BOOTSTRAP_CHECK_CLIENT_JVM, + BOOTSTRAP_CHECK_USE_SERIAL_COLLECTOR, + BOOTSTRAP_CHECK_SYSTEM_CALL_FILTER, + BOOTSTRAP_CHECK_ONERROR_AND_ONOUTOFMEMORYERROR, + BOOTSTRAP_CHECK_EARLY_ACCESS, + BOOTSTRAP_CHECK_G1GC, + BOOTSTRAP_CHECK_ALL_PERMISSION, + BOOTSTRAP_CHECK_DISCOVERY_CONFIGURATION, + BOOTSTRAP_CHECKS, + BOOTSTRAP_CHECK_ENCRYPT_SENSITIVE_DATA, + BOOTSTRAP_CHECK_PKI_REALM, + BOOTSTRAP_CHECK_ROLE_MAPPINGS, + BOOTSTRAP_CHECK_TLS, + BOOTSTRAP_CHECK_TOKEN_SSL, + BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP, // this comment keeps the ';' on the next line so every entry above has a trailing ',' which makes the diff for adding new links cleaner ; diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java index d46e54de8729e..d2972985cee8e 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -32,8 +32,8 @@ import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.tasks.RawTaskStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; diff --git a/server/src/main/java/org/elasticsearch/common/settings/RotatableSecret.java b/server/src/main/java/org/elasticsearch/common/settings/RotatableSecret.java new file mode 100644 index 0000000000000..b8bd1365dab04 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/settings/RotatableSecret.java @@ -0,0 +1,130 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; + +import java.time.Instant; +import java.util.concurrent.locks.StampedLock; + +/** + * A container for a {@link SecureString} that can be rotated with a grace period for the secret that has been rotated out. + * Once rotated the prior secret is available for a configured amount of time before it is invalidated. + * This allows for secret rotation without temporary failures or the need to tightly orchestrate + * multiple parties. This class is threadsafe, however it is also assumes that reading secrets are frequent (i.e. every request) + * but rotation is a rare (i.e. once a day). + */ +public class RotatableSecret { + private Secrets secrets; + private final StampedLock stampedLock = new StampedLock(); + + /** + * @param secret The secret to rotate. {@code null} if the secret is not configured. + */ + public RotatableSecret(@Nullable SecureString secret) { + this.secrets = new Secrets(Strings.hasText(secret) ? secret.clone() : null, null, Instant.EPOCH); + } + + /** + * Rotates the secret iff the new secret and current secret are different. If rotated, the current secret is moved to the prior secret + * which is valid for the given grace period and new secret is now considered the current secret. + * @param newSecret the secret to rotate in. + * @param gracePeriod the time period that the prior secret is valid. + */ + public void rotate(SecureString newSecret, TimeValue gracePeriod) { + long stamp = stampedLock.writeLock(); + try { + if (secrets.current == null || secrets.current.equals(newSecret) == false) { + secrets = new Secrets( + Strings.hasText(newSecret) ? newSecret.clone() : null, + secrets.current, + Instant.now().plusMillis(gracePeriod.getMillis()) + ); + } + } finally { + stampedLock.unlockWrite(stamp); + } + } + + /** + * @return true if the current or prior value has a non-null and a non-empty value + */ + public boolean isSet() { + checkExpired(); + return Strings.hasText(secrets.current) || Strings.hasText(secrets.prior); + } + + /** + * Check to see if the current or (non-expired) prior secret matches the passed in secret. + * @param secret The secret to match against. + * @return true if either the current or (non-expired) prior secret matches. + * false if nether match. false if current and prior secret are unset. false if passed in secret is null or empty + */ + public boolean matches(SecureString secret) { + checkExpired(); + if ((Strings.hasText(secrets.current) == false && Strings.hasText(secrets.prior) == false) || Strings.hasText(secret) == false) { + return false; + } + return secrets.current.equals(secret) || (secrets.prior != null && secrets.prior.equals(secret)); + } + + // for testing only + Secrets getSecrets() { + return secrets; + } + + // for testing only + boolean isWriteLocked() { + return stampedLock.isWriteLocked(); + } + + /** + * Checks to see if the prior secret TTL has expired. If expired, evict from the backing data structure. Always call this before + * reading the secret(s). + */ + private void checkExpired() { + boolean needToUnlock = false; + long stamp = stampedLock.tryOptimisticRead(); + boolean expired = secrets.prior != null && secrets.priorValidTill.isBefore(Instant.now()); // optimistic read + if (stampedLock.validate(stamp) == false) { + // optimism failed...potentially block to obtain the read lock and try the read again + stamp = stampedLock.readLock(); + needToUnlock = true; + expired = secrets.prior != null && secrets.priorValidTill.isBefore(Instant.now()); // locked read + } + try { + if (expired) { + long stampUpgrade = stampedLock.tryConvertToWriteLock(stamp); + if (stampUpgrade == 0) { + // upgrade failed so we need to manually unlock the read lock and grab the write lock + if (needToUnlock) { + stampedLock.unlockRead(stamp); + } + stamp = stampedLock.writeLock(); + expired = secrets.prior != null && secrets.priorValidTill.isBefore(Instant.now()); // check again since we had to unlock + } else { + stamp = stampUpgrade; + } + needToUnlock = true; + if (expired) { + SecureString prior = secrets.prior; + secrets = new Secrets(secrets.current, null, Instant.EPOCH); + prior.close(); // zero out the memory + } + } + } finally { + if (needToUnlock) { // only unlock if we acquired a read or write lock + stampedLock.unlock(stamp); + } + } + } + + public record Secrets(SecureString current, SecureString prior, Instant priorValidTill) {}; +} diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index b59ebc00e55c4..7bfba1ebdb176 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -157,7 +157,7 @@ public StoredContext stashContextPreservingRequestHeaders(final String... reques } /** - * When using a {@link org.elasticsearch.tracing.Tracer} to capture activity in Elasticsearch, when a parent span is already + * When using a {@link org.elasticsearch.telemetry.tracing.Tracer} to capture activity in Elasticsearch, when a parent span is already * in progress, it is necessary to start a new context before beginning a child span. This method creates a context, * moving tracing-related fields to different names so that a new child span can be started. This child span will pick up * the moved fields and use them to establish the parent-child relationship. @@ -213,7 +213,7 @@ public boolean hasTraceContext() { } /** - * When using a {@link org.elasticsearch.tracing.Tracer}, sometimes you need to start a span completely unrelated + * When using a {@link org.elasticsearch.telemetry.tracing.Tracer}, sometimes you need to start a span completely unrelated * to any current span. In order to avoid any parent/child relationship being created, this method creates a new * context that clears all the tracing fields. * diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java index aaff9e249af0f..692693f9e04c8 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/ChunkedToXContentHelper.java @@ -30,6 +30,10 @@ public static Iterator endObject() { return Iterators.single(((builder, params) -> builder.endObject())); } + public static Iterator startArray() { + return Iterators.single(((builder, params) -> builder.startArray())); + } + public static Iterator startArray(String name) { return Iterators.single(((builder, params) -> builder.startArray(name))); } @@ -82,6 +86,12 @@ private static Iterator map(String name, Map map, Fun return wrapWithObject(name, Iterators.map(map.entrySet().iterator(), toXContent)); } + /** + * Creates an Iterator of a single ToXContent object that serializes all the given 'contents' ToXContent objects into a single chunk. + * + * @param contents ToXContent objects supporting toXContent() calls. + * @return Iterator of a single ToXContent object serializing all the ToXContent "contents". + */ public static Iterator singleChunk(ToXContent... contents) { return Iterators.single((builder, params) -> { for (ToXContent content : contents) { diff --git a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java index 2ffadfb26a985..b7826ad17add2 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java @@ -36,7 +36,7 @@ public static ClusterState setLocalNode( ) { return ClusterState.builder(clusterState) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build()) - .compatibilityVersions(Map.of(localNode.getId(), compatibilityVersions)) + .putCompatibilityVersions(localNode.getId(), compatibilityVersions) .build(); } diff --git a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java index a013cba390f6b..767b7bdfb643f 100644 --- a/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/elasticsearch/http/AbstractHttpServerTransport.java @@ -38,8 +38,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 6fc6e7eb3ffbc..b2d3afe30cc36 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -27,8 +27,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.tracing.SpanId; -import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.telemetry.tracing.SpanId; +import org.elasticsearch.telemetry.tracing.Tracer; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index 49e2172b2f4b2..1cb03574afd86 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -160,7 +160,7 @@ private static IndexVersion findCurrent() { if (versionExtension == null) { return LATEST_DEFINED; } - var version = versionExtension.getCurrentIndexVersion(); + var version = versionExtension.getCurrentIndexVersion(LATEST_DEFINED); assert version.onOrAfter(LATEST_DEFINED); assert version.luceneVersion.equals(Version.LATEST) diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java index efa99c5d3f9f1..ceb0884e4c098 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherMultiGeoPointIndexFieldData.java @@ -113,7 +113,7 @@ public boolean advanceExact(int doc) throws IOException { values.sort(Long::compare); iterator = values.iterator(); - return true; + return values.isEmpty() == false; } } } diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java index cc47e796008cc..b94c77f28cf09 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBinaryIndexFieldData.java @@ -117,7 +117,7 @@ public boolean advanceExact(int doc) throws IOException { iterator = values.iterator(); - return true; + return values.isEmpty() == false; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java index 5cf486a35274c..fb6fde546eb69 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedBooleanIndexFieldData.java @@ -119,7 +119,7 @@ public boolean advanceExact(int doc) throws IOException { iteratorIndex = 0; - return true; + return (trueCount + falseCount) > 0; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java index c1659441f92d0..f5bdfd5e0d775 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedDoubleIndexFieldData.java @@ -118,7 +118,7 @@ public boolean advanceExact(int doc) throws IOException { values.sort(Double::compare); iterator = values.iterator(); - return true; + return values.isEmpty() == false; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java index f3f9446a42af4..cbbe6db8d3bf7 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/SourceValueFetcherSortedNumericIndexFieldData.java @@ -119,7 +119,7 @@ public boolean advanceExact(int doc) throws IOException { values.sort(Long::compare); iterator = values.iterator(); - return true; + return values.isEmpty() == false; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 365d4f615e30c..91f2165f6b0d1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -74,7 +74,7 @@ public class DenseVectorFieldMapper extends FieldMapper { public static final IndexVersion LITTLE_ENDIAN_FLOAT_STORED_INDEX_VERSION = IndexVersion.V_8_9_0; public static final String CONTENT_TYPE = "dense_vector"; - public static short MAX_DIMS_COUNT = 2048; // maximum allowed number of dimensions + public static short MAX_DIMS_COUNT = 4096; // maximum allowed number of dimensions public static short MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING = 128; // minimum number of dims for floats to be dynamically mapped to vector public static final int MAGNITUDE_BYTES = 4; diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index c4ef440a921e3..3adee7643990b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -3090,7 +3090,8 @@ public void startRecovery( PeerRecoveryTargetService.RecoveryListener recoveryListener, RepositoriesService repositoriesService, BiConsumer> mappingUpdateConsumer, - IndicesService indicesService + IndicesService indicesService, + long clusterStateVersion ) { // TODO: Create a proper object to encapsulate the recovery context // all of the current methods here follow a pattern of: @@ -3114,7 +3115,7 @@ public void startRecovery( case PEER -> { try { markAsRecovering("from " + recoveryState.getSourceNode(), recoveryState); - recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener); + recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), clusterStateVersion, recoveryListener); } catch (Exception e) { failShard("corrupted preexisting index", e); recoveryListener.onRecoveryFailure(new RecoveryFailedException(recoveryState, null, e), true); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index b34086ddb5b77..34e74c87aac94 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -879,7 +879,7 @@ public synchronized void verifyIndexMetadata(IndexMetadata metadata, IndexMetada } @Override - public IndexShard createShard( + public void createShard( final ShardRouting shardRouting, final PeerRecoveryTargetService recoveryTargetService, final PeerRecoveryTargetService.RecoveryListener recoveryListener, @@ -888,7 +888,8 @@ public IndexShard createShard( final GlobalCheckpointSyncer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, - final DiscoveryNode sourceNode + final DiscoveryNode sourceNode, + long clusterStateVersion ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); ensureChangesAllowed(); @@ -911,8 +912,7 @@ public IndexShard createShard( .masterNodeTimeout(TimeValue.MAX_VALUE), new ThreadedActionListener<>(threadPool.generic(), listener.map(ignored -> null)) ); - }, this); - return indexShard; + }, this, clusterStateVersion); } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java b/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java index 616f4f57abf06..5f5e1994995ea 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java @@ -12,9 +12,7 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; -import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import java.util.List; import java.util.Map; @@ -89,7 +87,6 @@ public SystemDataStreamDescriptor( this.type = Objects.requireNonNull(type, "type must be specified"); this.composableIndexTemplate = Objects.requireNonNull(composableIndexTemplate, "composableIndexTemplate must be provided"); this.componentTemplates = componentTemplates == null ? Map.of() : Map.copyOf(componentTemplates); - validateNoDownsamplingConfigured(composableIndexTemplate, componentTemplates); this.allowedElasticProductOrigins = Objects.requireNonNull( allowedElasticProductOrigins, "allowedElasticProductOrigins must not be null" @@ -102,16 +99,6 @@ public SystemDataStreamDescriptor( this.characterRunAutomaton = new CharacterRunAutomaton(buildAutomaton(backingIndexPatternForDataStream(this.dataStreamName))); } - private void validateNoDownsamplingConfigured( - ComposableIndexTemplate composableIndexTemplate, - Map componentTemplates - ) { - DataStreamLifecycle resolvedLifecycle = MetadataIndexTemplateService.resolveLifecycle(composableIndexTemplate, componentTemplates); - if (resolvedLifecycle != null && resolvedLifecycle.isEnabled() && resolvedLifecycle.getDownsamplingRounds() != null) { - throw new IllegalArgumentException("System data streams do not support downsampling as part of their lifecycle configuration"); - } - } - public String getDataStreamName() { return dataStreamName; } diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java index d6441a2920f43..98cea47a94a5d 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndexDescriptor.java @@ -550,6 +550,27 @@ public MappingsVersion getMappingsVersion() { * @param cause the action being attempted that triggered the check. Used in the error message. * @return the standardized error message */ + public String getMinimumMappingsVersionMessage(String cause) { + Objects.requireNonNull(cause); + final MappingsVersion actualMinimumMappingsVersion = priorSystemIndexDescriptors.isEmpty() + ? getMappingsVersion() + : priorSystemIndexDescriptors.get(priorSystemIndexDescriptors.size() - 1).mappingsVersion; + return Strings.format( + "[%s] failed - system index [%s] requires all data and master nodes to have mappings versions at least of version [%s]", + cause, + this.getPrimaryIndex(), + actualMinimumMappingsVersion + ); + } + + /** + * Gets a standardized message when the node contains a data or master node whose version is less + * than that of the minimum supported version of this descriptor and its prior descriptors. + * + * @param cause the action being attempted that triggered the check. Used in the error message. + * @return the standardized error message + */ + @Deprecated public String getMinimumNodeVersionMessage(String cause) { Objects.requireNonNull(cause); final Version actualMinimumVersion = priorSystemIndexDescriptors.isEmpty() @@ -572,6 +593,7 @@ public String getMinimumNodeVersionMessage(String cause) { * @return null if the lowest node version is lower than the minimum version in this descriptor, * or the appropriate descriptor if the supplied version is acceptable. */ + @Deprecated public SystemIndexDescriptor getDescriptorCompatibleWith(Version version) { if (minimumNodeVersion.onOrBefore(version)) { return this; @@ -584,6 +606,26 @@ public SystemIndexDescriptor getDescriptorCompatibleWith(Version version) { return null; } + /** + * Finds the descriptor that can be used within this cluster, by comparing the supplied minimum + * node version to this descriptor's minimum version and the prior descriptors minimum version. + * + * @param version the lower node version in the cluster + * @return null if the lowest node version is lower than the minimum version in this descriptor, + * or the appropriate descriptor if the supplied version is acceptable. + */ + public SystemIndexDescriptor getDescriptorCompatibleWith(MappingsVersion version) { + if (Objects.requireNonNull(version).version() >= mappingsVersion.version()) { + return this; + } + for (SystemIndexDescriptor prior : priorSystemIndexDescriptors) { + if (version.version() >= prior.mappingsVersion.version()) { + return prior; + } + } + return null; + } + /** * @return The names of thread pools that should be used for operations on this * system index. diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java b/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java index d1f8acfccc0ac..a0667db91daf6 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java @@ -12,7 +12,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.RefCountingRunnable; @@ -93,7 +92,7 @@ public void clusterChanged(ClusterChangedEvent event) { } // if we're in a mixed-version cluster, exit - if (state.nodes().getMaxNodeVersion().after(state.nodes().getSmallestNonClientNodeVersion())) { + if (state.hasMixedSystemIndexVersions()) { logger.debug("Skipping system indices up-to-date check as cluster has mixed versions"); return; } @@ -267,13 +266,13 @@ private static boolean checkIndexMappingUpToDate(SystemIndexDescriptor descripto return false; } - return Version.CURRENT.onOrBefore(readMappingVersion(descriptor, mappingMetadata)); + return descriptor.getMappingsVersion().version() <= readMappingVersion(descriptor, mappingMetadata); } /** * Fetches the mapping version from an index's mapping's `_meta` info. */ - private static Version readMappingVersion(SystemIndexDescriptor descriptor, MappingMetadata mappingMetadata) { + private static int readMappingVersion(SystemIndexDescriptor descriptor, MappingMetadata mappingMetadata) { final String indexName = descriptor.getPrimaryIndex(); try { @SuppressWarnings("unchecked") @@ -286,28 +285,28 @@ private static Version readMappingVersion(SystemIndexDescriptor descriptor, Mapp ); // This can happen with old system indices, such as .watches, which were created before we had the convention of // storing a version under `_meta.` We should just replace the template to be sure. - return Version.V_EMPTY; + return -1; } - final Object rawVersion = meta.get(descriptor.getMappingsNodeVersionMetaKey()); - if (rawVersion instanceof Integer) { - // This can happen with old system indices, such as .tasks, which were created before we used an Elasticsearch - // version here. We should just replace the template to be sure. - return Version.V_EMPTY; - } - final String versionString = rawVersion != null ? rawVersion.toString() : null; - if (versionString == null) { + final Object rawVersion = meta.get(SystemIndexDescriptor.VERSION_META_KEY); + if (rawVersion == null) { logger.warn( "No value found in mappings for [_meta.{}], assuming mappings update required", - descriptor.getMappingsNodeVersionMetaKey() + SystemIndexDescriptor.VERSION_META_KEY + ); + return -1; + } + if (rawVersion instanceof Integer == false) { + logger.warn( + "Value in [_meta.{}] was not an integer, assuming mappings update required", + SystemIndexDescriptor.VERSION_META_KEY ); - // If we called `Version.fromString(null)`, it would return `Version.CURRENT` and we wouldn't update the mappings - return Version.V_EMPTY; + return -1; } - return Version.fromString(versionString); + return (int) rawVersion; } catch (ElasticsearchParseException | IllegalArgumentException e) { logger.error(() -> "Cannot parse the mapping for index [" + indexName + "]", e); - return Version.V_EMPTY; + return -1; } } diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java index 0e8346f57ea1e..1ae53125ea938 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndices.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndices.java @@ -123,6 +123,13 @@ public class SystemIndices { new Feature(SYNONYMS_FEATURE_NAME, "Manages synonyms", List.of(SYNONYMS_DESCRIPTOR)) ).collect(Collectors.toUnmodifiableMap(Feature::getName, Function.identity())); + public static final Map SERVER_SYSTEM_MAPPINGS_VERSIONS = + SERVER_SYSTEM_FEATURE_DESCRIPTORS.values() + .stream() + .flatMap(feature -> feature.getIndexDescriptors().stream()) + .filter(SystemIndexDescriptor::isAutomaticallyManaged) + .collect(Collectors.toMap(SystemIndexDescriptor::getIndexPattern, SystemIndexDescriptor::getMappingsVersion)); + /** * The node's full list of system features is stored here. The map is keyed * on the value of {@link Feature#getName()}, and is used for fast lookup of diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index d05a5ea377520..39a302963d3d1 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -645,7 +645,8 @@ private void createShardWhenLockAvailable( this::updateGlobalCheckpointForShard, retentionLeaseSyncer, originalState.nodes().getLocalNode(), - sourceNode + sourceNode, + originalState.version() ); listener.onResponse(true); } catch (ShardLockObtainFailedException e) { @@ -1090,10 +1091,10 @@ U createIndex(IndexMetadata indexMetadata, List builtInIndex * @param retentionLeaseSyncer a callback when this shard syncs retention leases * @param targetNode the node where this shard will be recovered * @param sourceNode the source node to recover this shard from (it might be null) - * @return a new shard + * @param clusterStateVersion the cluster state version in which the shard was created * @throws IOException if an I/O exception occurs when creating the shard */ - T createShard( + void createShard( ShardRouting shardRouting, PeerRecoveryTargetService recoveryTargetService, PeerRecoveryTargetService.RecoveryListener recoveryListener, @@ -1102,7 +1103,8 @@ T createShard( GlobalCheckpointSyncer globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, DiscoveryNode targetNode, - @Nullable DiscoveryNode sourceNode + @Nullable DiscoveryNode sourceNode, + long clusterStateVersion ) throws IOException; /** diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceClusterStateDelay.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceClusterStateDelay.java new file mode 100644 index 0000000000000..6610447d488c2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceClusterStateDelay.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.indices.recovery; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + +import java.util.concurrent.Executor; +import java.util.function.Consumer; + +public class PeerRecoverySourceClusterStateDelay { + private PeerRecoverySourceClusterStateDelay() {} + + private static final Logger logger = LogManager.getLogger(PeerRecoverySourceClusterStateDelay.class); + + /** + * Waits for the given cluster state version to be applied locally before proceeding with recovery + */ + public static void ensureClusterStateVersion( + long clusterStateVersion, + ClusterService clusterService, + Executor executor, + ThreadContext threadContext, + ActionListener listener, + Consumer> proceedWithRecovery + ) { + if (clusterStateVersion <= clusterService.state().version()) { + // either our locally-applied cluster state is already fresh enough, or request.clusterStateVersion() == 0 for bwc + proceedWithRecovery.accept(listener); + } else { + logger.debug("delaying {} until application of cluster state version {}", proceedWithRecovery, clusterStateVersion); + final var waitListener = new SubscribableListener(); + final var clusterStateVersionListener = new ClusterStateListener() { + @Override + public void clusterChanged(ClusterChangedEvent event) { + if (clusterStateVersion <= event.state().version()) { + waitListener.onResponse(null); + } + } + + @Override + public String toString() { + return "ClusterStateListener for " + proceedWithRecovery; + } + }; + clusterService.addListener(clusterStateVersionListener); + waitListener.addListener(ActionListener.running(() -> clusterService.removeListener(clusterStateVersionListener))); + if (clusterStateVersion <= clusterService.state().version()) { + waitListener.onResponse(null); + } + waitListener.addListener( + listener.delegateFailureAndWrap((l, ignored) -> proceedWithRecovery.accept(l)), + executor, + threadContext + ); + // NB no timeout. If we never apply the fresh cluster state then eventually we leave the cluster which removes the recovery + // from the routing table so the target shard will fail. + } + } +} diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index eac119f920f6a..fbbd0655e9f10 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.core.Nullable; @@ -43,6 +42,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.function.Consumer; /** * The source recovery accepts recovery requests from other peer shards and start the recovery process from this @@ -59,20 +59,22 @@ public static class Actions { private final TransportService transportService; private final IndicesService indicesService; + private final ClusterService clusterService; private final RecoverySettings recoverySettings; private final RecoveryPlannerService recoveryPlannerService; final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries(); - @Inject public PeerRecoverySourceService( TransportService transportService, IndicesService indicesService, + ClusterService clusterService, RecoverySettings recoverySettings, RecoveryPlannerService recoveryPlannerService ) { this.transportService = transportService; this.indicesService = indicesService; + this.clusterService = clusterService; this.recoverySettings = recoverySettings; this.recoveryPlannerService = recoveryPlannerService; // When the target node wants to start a peer recovery it sends a START_RECOVERY request to the source @@ -132,6 +134,27 @@ public void clusterChanged(ClusterChangedEvent event) { } private void recover(StartRecoveryRequest request, Task task, ActionListener listener) { + PeerRecoverySourceClusterStateDelay.ensureClusterStateVersion( + request.clusterStateVersion(), + clusterService, + transportService.getThreadPool().generic(), + transportService.getThreadPool().getThreadContext(), + listener, + new Consumer<>() { + @Override + public void accept(ActionListener l) { + recoverWithFreshClusterState(request, task, l); + } + + @Override + public String toString() { + return "recovery [" + request + "]"; + } + } + ); + } + + private void recoverWithFreshClusterState(StartRecoveryRequest request, Task task, ActionListener listener) { final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); final IndexShard shard = indexService.getShard(request.shardId().id()); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index b3b23ac14d158..2cdd383114497 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -228,12 +228,18 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh } } - public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { + public void startRecovery( + final IndexShard indexShard, + final DiscoveryNode sourceNode, + final long clusterStateVersion, + final RecoveryListener listener + ) { final Releasable snapshotFileDownloadsPermit = tryAcquireSnapshotDownloadPermits(); // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.startRecovery( indexShard, sourceNode, + clusterStateVersion, snapshotFilesProvider, listener, recoverySettings.activityTimeout(), @@ -319,7 +325,8 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi recoveryId, indexShard.shardId(), transportService.getLocalNode(), - indexShard.routingEntry().allocationId().getId() + indexShard.routingEntry().allocationId().getId(), + recoveryTarget.clusterStateVersion() ), new ActionListener<>() { @Override @@ -455,6 +462,7 @@ public static StartRecoveryRequest getStartRecoveryRequest( recoveryTarget.indexShard().routingEntry().allocationId().getId(), recoveryTarget.sourceNode(), localNode, + recoveryTarget.clusterStateVersion(), metadataSnapshot, recoveryTarget.state().getPrimary(), recoveryTarget.recoveryId(), diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index cb73d104078dc..0eace1b8fb220 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -54,6 +54,7 @@ public RecoveriesCollection(Logger logger, ThreadPool threadPool) { public long startRecovery( IndexShard indexShard, DiscoveryNode sourceNode, + long clusterStateVersion, SnapshotFilesProvider snapshotFilesProvider, PeerRecoveryTargetService.RecoveryListener listener, TimeValue activityTimeout, @@ -62,6 +63,7 @@ public long startRecovery( RecoveryTarget recoveryTarget = new RecoveryTarget( indexShard, sourceNode, + clusterStateVersion, snapshotFilesProvider, snapshotFileDownloadsPermit, listener diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index aace3d35efb53..4f0d3b7d798cc 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -70,6 +70,7 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget private final long recoveryId; private final IndexShard indexShard; private final DiscoveryNode sourceNode; + private final long clusterStateVersion; private final SnapshotFilesProvider snapshotFilesProvider; private volatile MultiFileWriter multiFileWriter; private final RecoveryRequestTracker requestTracker = new RecoveryRequestTracker(); @@ -94,16 +95,18 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget /** * Creates a new recovery target object that represents a recovery to the provided shard. * - * @param indexShard local shard where we want to recover to - * @param sourceNode source node of the recovery where we recover from - * @param snapshotFileDownloadsPermit a permit that allows to download files from a snapshot, - * limiting the concurrent snapshot file downloads per node - * preventing the exhaustion of repository resources. - * @param listener called when recovery is completed/failed + * @param indexShard local shard where we want to recover to + * @param sourceNode source node of the recovery where we recover from + * @param clusterStateVersion version of the cluster state that initiated the recovery + * @param snapshotFileDownloadsPermit a permit that allows to download files from a snapshot, + * limiting the concurrent snapshot file downloads per node + * preventing the exhaustion of repository resources. + * @param listener called when recovery is completed/failed */ public RecoveryTarget( IndexShard indexShard, DiscoveryNode sourceNode, + long clusterStateVersion, SnapshotFilesProvider snapshotFilesProvider, @Nullable Releasable snapshotFileDownloadsPermit, PeerRecoveryTargetService.RecoveryListener listener @@ -114,6 +117,7 @@ public RecoveryTarget( this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); this.indexShard = indexShard; this.sourceNode = sourceNode; + this.clusterStateVersion = clusterStateVersion; this.snapshotFilesProvider = snapshotFilesProvider; this.snapshotFileDownloadsPermit = snapshotFileDownloadsPermit; this.shardId = indexShard.shardId(); @@ -149,7 +153,14 @@ public RecoveryTarget retryCopy() { // get released after the retry copy is created Releasable snapshotFileDownloadsPermitCopy = snapshotFileDownloadsPermit; snapshotFileDownloadsPermit = null; - return new RecoveryTarget(indexShard, sourceNode, snapshotFilesProvider, snapshotFileDownloadsPermitCopy, listener); + return new RecoveryTarget( + indexShard, + sourceNode, + clusterStateVersion, + snapshotFilesProvider, + snapshotFileDownloadsPermitCopy, + listener + ); } @Nullable @@ -174,6 +185,10 @@ public DiscoveryNode sourceNode() { return this.sourceNode; } + public long clusterStateVersion() { + return clusterStateVersion; + } + public RecoveryState state() { return indexShard.recoveryState(); } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java b/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java index 8ed47b448b749..4ace9ab1bc28d 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/StartRecoveryRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -29,6 +30,7 @@ public class StartRecoveryRequest extends TransportRequest { private final String targetAllocationId; private final DiscoveryNode sourceNode; private final DiscoveryNode targetNode; + private final long clusterStateVersion; private final Store.MetadataSnapshot metadataSnapshot; private final boolean primaryRelocation; private final long startingSeqNo; @@ -41,6 +43,11 @@ public StartRecoveryRequest(StreamInput in) throws IOException { targetAllocationId = in.readString(); sourceNode = new DiscoveryNode(in); targetNode = new DiscoveryNode(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.WAIT_FOR_CLUSTER_STATE_IN_RECOVERY_ADDED)) { + clusterStateVersion = in.readVLong(); + } else { + clusterStateVersion = 0L; // bwc: do not wait for cluster state to be applied + } metadataSnapshot = Store.MetadataSnapshot.readFrom(in); primaryRelocation = in.readBoolean(); startingSeqNo = in.readLong(); @@ -58,6 +65,7 @@ public StartRecoveryRequest(StreamInput in) throws IOException { * @param targetAllocationId the allocation id of the target shard * @param sourceNode the source node to remover from * @param targetNode the target node to recover to + * @param clusterStateVersion the cluster state version which initiated the recovery * @param metadataSnapshot the Lucene metadata * @param primaryRelocation whether or not the recovery is a primary relocation * @param recoveryId the recovery ID @@ -69,12 +77,14 @@ public StartRecoveryRequest( final String targetAllocationId, final DiscoveryNode sourceNode, final DiscoveryNode targetNode, + final long clusterStateVersion, final Store.MetadataSnapshot metadataSnapshot, final boolean primaryRelocation, final long recoveryId, final long startingSeqNo, final boolean canDownloadSnapshotFiles ) { + this.clusterStateVersion = clusterStateVersion; this.recoveryId = recoveryId; this.shardId = shardId; this.targetAllocationId = targetAllocationId; @@ -108,6 +118,10 @@ public DiscoveryNode targetNode() { return targetNode; } + public long clusterStateVersion() { + return clusterStateVersion; + } + public boolean isPrimaryRelocation() { return primaryRelocation; } @@ -129,11 +143,13 @@ public String getDescription() { return Strings.format( """ recovery of %s to %s \ - [recoveryId=%d, targetAllocationId=%s, startingSeqNo=%d, primaryRelocation=%s, canDownloadSnapshotFiles=%s]""", + [recoveryId=%d, targetAllocationId=%s, clusterStateVersion=%d, startingSeqNo=%d, \ + primaryRelocation=%s, canDownloadSnapshotFiles=%s]""", shardId, targetNode.descriptionWithoutAttributes(), recoveryId, targetAllocationId, + clusterStateVersion, startingSeqNo, primaryRelocation, canDownloadSnapshotFiles @@ -148,6 +164,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(targetAllocationId); sourceNode.writeTo(out); targetNode.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.WAIT_FOR_CLUSTER_STATE_IN_RECOVERY_ADDED)) { + out.writeVLong(clusterStateVersion); + } // else bwc: just omit it, the receiver doesn't wait for a cluster state anyway metadataSnapshot.writeTo(out); out.writeBoolean(primaryRelocation); out.writeLong(startingSeqNo); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationAction.java b/server/src/main/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationAction.java index 1c40d40456014..eed6a1d02ae16 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationAction.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -33,12 +34,14 @@ public static class Request extends ActionRequest { private final ShardId shardId; private final DiscoveryNode targetNode; private final String targetAllocationId; + private final long clusterStateVersion; - public Request(long recoveryId, ShardId shardId, DiscoveryNode targetNode, String targetAllocationId) { + public Request(long recoveryId, ShardId shardId, DiscoveryNode targetNode, String targetAllocationId, long clusterStateVersion) { this.recoveryId = recoveryId; this.shardId = shardId; this.targetNode = targetNode; this.targetAllocationId = targetAllocationId; + this.clusterStateVersion = clusterStateVersion; } public Request(StreamInput in) throws IOException { @@ -47,6 +50,11 @@ public Request(StreamInput in) throws IOException { shardId = new ShardId(in); targetNode = new DiscoveryNode(in); targetAllocationId = in.readString(); + if (in.getTransportVersion().onOrAfter(TransportVersions.WAIT_FOR_CLUSTER_STATE_IN_RECOVERY_ADDED)) { + clusterStateVersion = in.readVLong(); + } else { + clusterStateVersion = 0L; // temporary bwc: do not wait for cluster state to be applied + } } @Override @@ -61,6 +69,9 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); targetNode.writeTo(out); out.writeString(targetAllocationId); + if (out.getTransportVersion().onOrAfter(TransportVersions.WAIT_FOR_CLUSTER_STATE_IN_RECOVERY_ADDED)) { + out.writeVLong(clusterStateVersion); + } // temporary bwc: just omit it, the receiver doesn't wait for a cluster state anyway } public long recoveryId() { @@ -79,6 +90,10 @@ public String targetAllocationId() { return targetAllocationId; } + public long clusterStateVersion() { + return clusterStateVersion; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -87,12 +102,13 @@ public boolean equals(Object o) { return recoveryId == request.recoveryId && shardId.equals(request.shardId) && targetNode.equals(request.targetNode) - && targetAllocationId.equals(request.targetAllocationId); + && targetAllocationId.equals(request.targetAllocationId) + && clusterStateVersion == request.clusterStateVersion; } @Override public int hashCode() { - return Objects.hash(recoveryId, shardId, targetNode, targetAllocationId); + return Objects.hash(recoveryId, shardId, targetNode, targetAllocationId, clusterStateVersion); } } } diff --git a/server/src/main/java/org/elasticsearch/internal/VersionExtension.java b/server/src/main/java/org/elasticsearch/internal/VersionExtension.java index 83974b3b65158..9c1b515df4043 100644 --- a/server/src/main/java/org/elasticsearch/internal/VersionExtension.java +++ b/server/src/main/java/org/elasticsearch/internal/VersionExtension.java @@ -18,14 +18,16 @@ public interface VersionExtension { /** * Returns the {@link TransportVersion} that Elasticsearch should use. *

- * This must be at least equal to the latest version found in {@link TransportVersion} V_* constants. + * This must be at least as high as the given fallback. + * @param fallback The latest transport version from server */ - TransportVersion getCurrentTransportVersion(); + TransportVersion getCurrentTransportVersion(TransportVersion fallback); /** * Returns the {@link IndexVersion} that Elasticsearch should use. *

- * This must be at least equal to the latest version found in {@link IndexVersion} V_* constants. + * This must be at least as high as the given fallback. + * @param fallback The latest index version from server */ - IndexVersion getCurrentIndexVersion(); + IndexVersion getCurrentIndexVersion(IndexVersion fallback); } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 1ae3aaa9e09db..198abe36e1452 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -213,9 +213,9 @@ import org.elasticsearch.tasks.TaskCancellationService; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.tasks.TaskResultsService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.RemoteClusterPortSettings; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; @@ -916,7 +916,6 @@ protected Node( repositoryService, clusterModule.getAllocationService(), metadataCreateIndexService, - clusterModule.getMetadataDeleteIndexService(), indexMetadataVerifier, shardLimitValidator, systemIndices, @@ -1107,7 +1106,13 @@ protected Node( final SnapshotFilesProvider snapshotFilesProvider = new SnapshotFilesProvider(repositoryService); b.bind(PeerRecoverySourceService.class) .toInstance( - new PeerRecoverySourceService(transportService, indicesService, recoverySettings, recoveryPlannerService) + new PeerRecoverySourceService( + transportService, + indicesService, + clusterService, + recoverySettings, + recoveryPlannerService + ) ); b.bind(PeerRecoveryTargetService.class) .toInstance( diff --git a/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java index 28f3a778c01a0..a911cec220f60 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ClusterCoordinationPlugin.java @@ -77,17 +77,6 @@ CoordinationState.PersistedState createPersistedState( } interface PersistedClusterStateServiceFactory { - - @Deprecated(forRemoval = true) - default PersistedClusterStateService newPersistedClusterStateService( - NodeEnvironment nodeEnvironment, - NamedXContentRegistry xContentRegistry, - ClusterSettings clusterSettings, - ThreadPool threadPool - ) { - throw new AssertionError("Should not be called!"); - } - PersistedClusterStateService newPersistedClusterStateService( NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry, diff --git a/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java b/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java index 6a252a25ce553..bdedab5b990fd 100644 --- a/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/NetworkPlugin.java @@ -17,8 +17,8 @@ import org.elasticsearch.http.HttpPreRequest; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/main/java/org/elasticsearch/plugins/Plugin.java b/server/src/main/java/org/elasticsearch/plugins/Plugin.java index b736682b72bcf..83e620aa30d12 100644 --- a/server/src/main/java/org/elasticsearch/plugins/Plugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/Plugin.java @@ -26,9 +26,9 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; diff --git a/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java b/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java index 3e5cddc28e3b0..eccc38cf9f5c9 100644 --- a/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/TracerPlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.plugins; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.telemetry.tracing.Tracer; public interface TracerPlugin { Tracer getTracer(Settings settings); diff --git a/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java index efabc85268acc..35badffe0b3aa 100644 --- a/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/interceptor/RestServerActionPlugin.java @@ -15,7 +15,7 @@ import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; -import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.usage.UsageService; import java.util.function.UnaryOperator; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c0ef6581db94b..bfa4cc5be7863 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.SingleResultDeduplicator; import org.elasticsearch.action.support.GroupedActionListener; -import org.elasticsearch.action.support.ListenableActionFuture; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; @@ -68,6 +67,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.xcontent.ChunkedToXContent; @@ -1788,36 +1788,41 @@ public void getRepositoryData(ActionListener listener) { // master-eligible or not. assert clusterService.localNode().isMasterNode() : "should only load repository data on master nodes"; - if (lifecycle.started() == false) { - listener.onFailure(notStartedException()); - return; - } + while (true) { + // retry loop, in case the state changes underneath us somehow - if (latestKnownRepoGen.get() == RepositoryData.CORRUPTED_REPO_GEN) { - listener.onFailure(corruptedStateException(null, null)); - return; - } - final RepositoryData cached = latestKnownRepositoryData.get(); - // Fast path loading repository data directly from cache if we're in fully consistent mode and the cache matches up with - // the latest known repository generation - if (bestEffortConsistency == false && cached.getGenId() == latestKnownRepoGen.get()) { - listener.onResponse(cached); - return; - } - if (metadata.generation() == RepositoryData.UNKNOWN_REPO_GEN && isReadOnly() == false) { - logger.debug( - "[{}] loading repository metadata for the first time, trying to determine correct generation and to store " - + "it in the cluster state", - metadata.name() - ); - initializeRepoGenerationTracking(listener); - } else { - logger.trace( - "[{}] loading un-cached repository data with best known repository generation [{}]", - metadata.name(), - latestKnownRepoGen - ); - repoDataLoadDeduplicator.execute(listener); + if (lifecycle.started() == false) { + listener.onFailure(notStartedException()); + return; + } + + if (latestKnownRepoGen.get() == RepositoryData.CORRUPTED_REPO_GEN) { + listener.onFailure(corruptedStateException(null, null)); + return; + } + final RepositoryData cached = latestKnownRepositoryData.get(); + // Fast path loading repository data directly from cache if we're in fully consistent mode and the cache matches up with + // the latest known repository generation + if (bestEffortConsistency == false && cached.getGenId() == latestKnownRepoGen.get()) { + listener.onResponse(cached); + return; + } + if (metadata.generation() == RepositoryData.UNKNOWN_REPO_GEN && isReadOnly() == false) { + logger.debug(""" + [{}] loading repository metadata for the first time, trying to determine correct generation and to store it in the \ + cluster state""", metadata.name()); + if (initializeRepoGenerationTracking(listener)) { + return; + } // else there was a concurrent modification, retry from the start + } else { + logger.trace( + "[{}] loading un-cached repository data with best known repository generation [{}]", + metadata.name(), + latestKnownRepoGen + ); + repoDataLoadDeduplicator.execute(listener); + return; + } } } @@ -1826,7 +1831,8 @@ private RepositoryException notStartedException() { } // Listener used to ensure that repository data is only initialized once in the cluster state by #initializeRepoGenerationTracking - private ListenableActionFuture repoDataInitialized; + @Nullable // unless we're in the process of initializing repo-generation tracking + private SubscribableListener repoDataInitialized; /** * Method used to set the current repository generation in the cluster state's {@link RepositoryMetadata} to the latest generation that @@ -1835,103 +1841,120 @@ private RepositoryException notStartedException() { * have a consistent view of the {@link RepositoryData} before any data has been written to the repository. * * @param listener listener to resolve with new repository data + * @return {@code true} if this method at least started the initialization process successfully and will eventually complete the + * listener, {@code false} if there was some concurrent state change which prevents us from starting repo generation tracking (typically + * that some other node got there first) and the caller should check again and possibly retry or complete the listener in some other + * way. */ - private void initializeRepoGenerationTracking(ActionListener listener) { + private boolean initializeRepoGenerationTracking(ActionListener listener) { + final SubscribableListener listenerToSubscribe; + final ActionListener listenerToComplete; + synchronized (this) { if (repoDataInitialized == null) { - // double check the generation since we checked it outside the mutex in the caller and it could have changed by a + // double-check the generation since we checked it outside the mutex in the caller and it could have changed by a // concurrent initialization of the repo metadata and just load repository normally in case we already finished the // initialization if (metadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) { - getRepositoryData(listener); - return; + return false; // retry } logger.trace("[{}] initializing repository generation in cluster state", metadata.name()); - repoDataInitialized = new ListenableActionFuture<>(); - repoDataInitialized.addListener(listener); - final Consumer onFailure = e -> { - logger.warn( - () -> format("[%s] Exception when initializing repository generation in cluster state", metadata.name()), - e - ); - final ActionListener existingListener; - synchronized (BlobStoreRepository.this) { - existingListener = repoDataInitialized; - repoDataInitialized = null; + repoDataInitialized = listenerToSubscribe = new SubscribableListener<>(); + listenerToComplete = new ActionListener<>() { + private ActionListener acquireAndClearRepoDataInitialized() { + synchronized (BlobStoreRepository.this) { + assert repoDataInitialized == listenerToSubscribe; + repoDataInitialized = null; + return listenerToSubscribe; + } } - existingListener.onFailure(e); - }; - repoDataLoadDeduplicator.execute( - ActionListener.wrap( - repoData -> submitUnbatchedTask( - "set initial safe repository generation [" + metadata.name() + "][" + repoData.getGenId() + "]", - new ClusterStateUpdateTask() { - @Override - public ClusterState execute(ClusterState currentState) { - RepositoryMetadata metadata = getRepoMetadata(currentState); - // No update to the repository generation should have occurred concurrently in general except - // for - // extreme corner cases like failing over to an older version master node and back to the - // current - // node concurrently - if (metadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) { - throw new RepositoryException( - metadata.name(), - "Found unexpected initialized repo metadata [" + metadata + "]" - ); - } - return ClusterState.builder(currentState) - .metadata( - Metadata.builder(currentState.getMetadata()) - .putCustom( - RepositoriesMetadata.TYPE, - RepositoriesMetadata.get(currentState) - .withUpdatedGeneration(metadata.name(), repoData.getGenId(), repoData.getGenId()) - ) - ) - .build(); - } - @Override - public void onFailure(Exception e) { - onFailure.accept(e); - } + @Override + public void onResponse(RepositoryData repositoryData) { + acquireAndClearRepoDataInitialized().onResponse(repositoryData); + } - @Override - public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - logger.trace( - "[{}] initialized repository generation in cluster state to [{}]", - metadata.name(), - repoData.getGenId() - ); - // Resolve listeners on generic pool since some callbacks for repository data do additional IO - threadPool.generic().execute(() -> { - final ActionListener existingListener; - synchronized (BlobStoreRepository.this) { - existingListener = repoDataInitialized; - repoDataInitialized = null; - } - existingListener.onResponse(repoData); - logger.trace( - "[{}] called listeners after initializing repository to generation [{}]", - metadata.name(), - repoData.getGenId() - ); - }); - } - } - ), - onFailure - ) - ); + @Override + public void onFailure(Exception e) { + logger.warn( + () -> format("[%s] Exception when initializing repository generation in cluster state", metadata.name()), + e + ); + acquireAndClearRepoDataInitialized().onFailure(e); + } + }; } else { logger.trace( "[{}] waiting for existing initialization of repository metadata generation in cluster state", metadata.name() ); - repoDataInitialized.addListener(listener); - } + listenerToComplete = null; + listenerToSubscribe = repoDataInitialized; + } + } + + if (listenerToComplete != null) { + SubscribableListener + // load the current repository data + .newForked(repoDataLoadDeduplicator::execute) + // write its generation to the cluster state + .andThen( + (l, repoData) -> submitUnbatchedTask( + "set initial safe repository generation [" + metadata.name() + "][" + repoData.getGenId() + "]", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return getClusterStateWithUpdatedRepositoryGeneration(currentState, repoData); + } + + @Override + public void onFailure(Exception e) { + l.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { + l.onResponse(repoData); + } + } + ) + ) + // fork to generic pool since we're on the applier thread and some callbacks for repository data do additional IO + .andThen((l, repoData) -> { + logger.trace("[{}] initialized repository generation in cluster state to [{}]", metadata.name(), repoData.getGenId()); + threadPool.generic().execute(ActionRunnable.supply(ActionListener.runAfter(l, () -> { + logger.trace( + "[{}] called listeners after initializing repository to generation [{}]", + metadata.name(), + repoData.getGenId() + ); + }), () -> repoData)); + }) + // and finally complete the listener + .addListener(listenerToComplete); } + + listenerToSubscribe.addListener(listener, EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.getThreadContext()); + return true; + } + + private ClusterState getClusterStateWithUpdatedRepositoryGeneration(ClusterState currentState, RepositoryData repoData) { + // In theory we might have failed over to a different master which initialized the repo and then failed back to this node, so we + // must check the repository generation in the cluster state is still unknown here. + final RepositoryMetadata repoMetadata = getRepoMetadata(currentState); + if (repoMetadata.generation() != RepositoryData.UNKNOWN_REPO_GEN) { + throw new RepositoryException(repoMetadata.name(), "Found unexpected initialized repo metadata [" + repoMetadata + "]"); + } + return ClusterState.builder(currentState) + .metadata( + Metadata.builder(currentState.getMetadata()) + .putCustom( + RepositoriesMetadata.TYPE, + RepositoriesMetadata.get(currentState) + .withUpdatedGeneration(repoMetadata.name(), repoData.getGenId(), repoData.getGenId()) + ) + ) + .build(); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index dea13af5383f7..c5dd4acd33aa0 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -30,7 +30,7 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.usage.SearchUsageHolder; import org.elasticsearch.usage.UsageService; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index 88fd1b610ba1d..896c341953e73 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -9,17 +9,12 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; -import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestBuilderListener; -import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -63,14 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false)); req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false)); - return channel -> client.admin() - .cluster() - .allocationExplain(req, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(ClusterAllocationExplainResponse response, XContentBuilder builder) throws IOException { - response.getExplanation().toXContent(builder, ToXContent.EMPTY_PARAMS); - return new RestResponse(RestStatus.OK, builder); - } - }); + return channel -> client.admin().cluster().allocationExplain(req, new RestChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/script/field/BaseKeywordDocValuesField.java b/server/src/main/java/org/elasticsearch/script/field/BaseKeywordDocValuesField.java index f88804662ee6f..d53a2a4a0c78a 100644 --- a/server/src/main/java/org/elasticsearch/script/field/BaseKeywordDocValuesField.java +++ b/server/src/main/java/org/elasticsearch/script/field/BaseKeywordDocValuesField.java @@ -132,4 +132,8 @@ public String next() { } }; } + + public SortedBinaryDocValues getInput() { + return input; + } } diff --git a/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java b/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java index 79a4c3fa1b2ee..d18ae16746819 100644 --- a/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java +++ b/server/src/main/java/org/elasticsearch/script/field/vectors/DenseVector.java @@ -19,7 +19,7 @@ * 1) float[], this is for the ScoreScriptUtils class bindings which have converted a List based query vector into an array * 2) List, A painless script will typically use Lists since they are easy to pass as params and have an easy * literal syntax. Working with Lists directly, instead of converting to a float[], trades off runtime operations against - * memory pressure. Dense Vectors may have high dimensionality, up to 2048. Allocating a float[] per doc per script API + * memory pressure. Dense Vectors may have high dimensionality, up to 4096. Allocating a float[] per doc per script API * call is prohibitively expensive. * 3) Object, the whitelisted method for the painless API. Calls into the float[] or List version based on the class of the argument and checks dimensionality. diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 713fa4fa6c3e1..0de6cb133bca3 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -114,11 +114,11 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.Transports; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java index a71d26061752e..9ac9c0e241566 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -233,7 +233,8 @@ private long getBackgroundFrequency(Query query) throws IOException { if (backgroundFilter != null) { query = new BooleanQuery.Builder().add(query, Occur.FILTER).add(backgroundFilter, Occur.FILTER).build(); } - return context.searcher().count(query); + // use a brand new index searcher as we want to run this query on the current thread + return new IndexSearcher(context.searcher().getIndexReader()).count(query); } private TermsEnum getTermsEnum(String field) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 589163ab00581..025a1840c04d9 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -175,8 +175,6 @@ public class RestoreService implements ClusterStateApplier { private final IndexMetadataVerifier indexMetadataVerifier; - private final MetadataDeleteIndexService metadataDeleteIndexService; - private final ShardLimitValidator shardLimitValidator; private final ClusterSettings clusterSettings; @@ -196,7 +194,6 @@ public RestoreService( RepositoriesService repositoriesService, AllocationService allocationService, MetadataCreateIndexService createIndexService, - MetadataDeleteIndexService metadataDeleteIndexService, IndexMetadataVerifier indexMetadataVerifier, ShardLimitValidator shardLimitValidator, SystemIndices systemIndices, @@ -209,7 +206,6 @@ public RestoreService( this.allocationService = allocationService; this.createIndexService = createIndexService; this.indexMetadataVerifier = indexMetadataVerifier; - this.metadataDeleteIndexService = metadataDeleteIndexService; if (DiscoveryNode.isMasterNode(clusterService.getSettings())) { clusterService.addStateApplier(this); } @@ -481,6 +477,7 @@ private void startRestore( metadataBuilder.dataStreams(dataStreamsToRestore, dataStreamAliasesToRestore).build(), dataStreamsToRestore.values(), updater, + clusterService.getSettings(), listener ) ); @@ -1208,6 +1205,7 @@ private final class RestoreSnapshotStateTask extends ClusterStateUpdateTask { private final BiConsumer updater; private final AllocationActionListener listener; + private final Settings settings; @Nullable private RestoreInfo restoreInfo; @@ -1221,6 +1219,7 @@ private final class RestoreSnapshotStateTask extends ClusterStateUpdateTask { Metadata metadata, Collection dataStreamsToRestore, BiConsumer updater, + Settings settings, ActionListener listener ) { super(request.masterNodeTimeout()); @@ -1232,6 +1231,7 @@ private final class RestoreSnapshotStateTask extends ClusterStateUpdateTask { this.metadata = metadata; this.dataStreamsToRestore = dataStreamsToRestore; this.updater = updater; + this.settings = settings; this.listener = new AllocationActionListener<>(listener, threadPool.getThreadContext()); } @@ -1241,9 +1241,10 @@ public ClusterState execute(ClusterState currentState) { ensureSnapshotNotDeleted(currentState); // Clear out all existing indices which fall within a system index pattern being restored - currentState = metadataDeleteIndexService.deleteIndices( + currentState = MetadataDeleteIndexService.deleteIndices( currentState, - resolveSystemIndicesToDelete(currentState, featureStatesToRestore) + resolveSystemIndicesToDelete(currentState, featureStatesToRestore), + settings ); // List of searchable snapshots indices to restore diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 114f4ded2cc9d..620e0e44f95e9 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -30,8 +30,8 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.TaskTransportChannel; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransportChannel; diff --git a/server/src/main/java/org/elasticsearch/tracing/SpanId.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java similarity index 96% rename from server/src/main/java/org/elasticsearch/tracing/SpanId.java rename to server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java index c91dc4b1080d1..8a22102baadf9 100644 --- a/server/src/main/java/org/elasticsearch/tracing/SpanId.java +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing; +package org.elasticsearch.telemetry.tracing; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; diff --git a/server/src/main/java/org/elasticsearch/tracing/Tracer.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java similarity index 99% rename from server/src/main/java/org/elasticsearch/tracing/Tracer.java rename to server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java index 5d2b2f3c4ea63..ee0b3737abbb0 100644 --- a/server/src/main/java/org/elasticsearch/tracing/Tracer.java +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.tracing; +package org.elasticsearch.telemetry.tracing; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasable; diff --git a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 09581fcc4f1e3..fbc1dbdf6c8fc 100644 --- a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -15,7 +15,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.tracing.Tracer; +import org.elasticsearch.telemetry.tracing.Tracer; import java.io.IOException; import java.util.concurrent.Executor; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 5369b9a9eec13..7b1ca8d141c85 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -42,9 +42,9 @@ import org.elasticsearch.node.ReportingService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.io.IOException; import java.io.UncheckedIOException; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index 4de327d203d16..b162aa5b6c31a 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -6,5 +6,27 @@ "SHARD_LOCK_TROUBLESHOOTING": "troubleshooting-unstable-cluster.html#_diagnosing_shardlockobtainfailedexception_failures_2", "CONCURRENT_REPOSITORY_WRITERS": "add-repository.html", "ARCHIVE_INDICES": "archive-indices.html", - "HTTP_TRACER": "modules-network.html#http-rest-request-tracer" + "HTTP_TRACER": "modules-network.html#http-rest-request-tracer", + "BOOTSTRAP_CHECK_HEAP_SIZE": "_heap_size_check.html", + "BOOTSTRAP_CHECK_FILE_DESCRIPTOR": "_file_descriptor_check.html", + "BOOTSTRAP_CHECK_MEMORY_LOCK": "_memory_lock_check.html", + "BOOTSTRAP_CHECK_MAX_NUMBER_THREADS": "max-number-threads-check.html", + "BOOTSTRAP_CHECK_MAX_FILE_SIZE": "_max_file_size_check.html", + "BOOTSTRAP_CHECK_MAX_SIZE_VIRTUAL_MEMORY": "max-size-virtual-memory-check.html", + "BOOTSTRAP_CHECK_MAXIMUM_MAP_COUNT": "_maximum_map_count_check.html", + "BOOTSTRAP_CHECK_CLIENT_JVM": "_client_jvm_check.html", + "BOOTSTRAP_CHECK_USE_SERIAL_COLLECTOR": "_use_serial_collector_check.html", + "BOOTSTRAP_CHECK_SYSTEM_CALL_FILTER": "_system_call_filter_check.html", + "BOOTSTRAP_CHECK_ONERROR_AND_ONOUTOFMEMORYERROR": "_onerror_and_onoutofmemoryerror_checks.html", + "BOOTSTRAP_CHECK_EARLY_ACCESS": "_early_access_check.html", + "BOOTSTRAP_CHECK_G1GC": "_g1gc_check.html", + "BOOTSTRAP_CHECK_ALL_PERMISSION": "_all_permission_check.html", + "BOOTSTRAP_CHECK_DISCOVERY_CONFIGURATION": "_discovery_configuration_check.html", + "BOOTSTRAP_CHECKS": "bootstrap-checks.html", + "BOOTSTRAP_CHECK_ENCRYPT_SENSITIVE_DATA": "bootstrap-checks-xpack.html#_encrypt_sensitive_data_check", + "BOOTSTRAP_CHECK_PKI_REALM": "bootstrap-checks-xpack.html#_pki_realm_check", + "BOOTSTRAP_CHECK_ROLE_MAPPINGS": "bootstrap-checks-xpack.html#_role_mappings_check", + "BOOTSTRAP_CHECK_TLS": "bootstrap-checks-xpack.html#bootstrap-checks-tls", + "BOOTSTRAP_CHECK_TOKEN_SSL": "bootstrap-checks-xpack.html#_token_ssl_check", + "BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP": "security-minimal-setup.html" } diff --git a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java index 1279ea810f0a6..28b4c8a63df4d 100644 --- a/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java +++ b/server/src/test/java/org/elasticsearch/action/ActionModuleTests.java @@ -35,10 +35,10 @@ import org.elasticsearch.rest.action.admin.cluster.RestNodesInfoAction; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.usage.UsageService; import org.hamcrest.Matchers; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index 70df808ae1f08..f68e83e13496c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.gateway.TestGatewayAllocator; @@ -88,7 +89,7 @@ public ShardAllocationDecision decideShardAllocation(ShardRouting shard, Routing assertFalse(cae.getShardAllocationDecision().getAllocateDecision().isDecisionTaken()); assertFalse(cae.getShardAllocationDecision().getMoveDecision().isDecisionTaken()); XContentBuilder builder = XContentFactory.jsonBuilder(); - cae.toXContent(builder, ToXContent.EMPTY_PARAMS); + ChunkedToXContent.wrapAsToXContent(cae).toXContent(builder, ToXContent.EMPTY_PARAMS); String explanation; if (shardRoutingState == ShardRoutingState.RELOCATING) { explanation = "the shard is in the process of relocating from node [] to node [], wait until " + "relocation has completed"; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java index 4243a943c6761..6ade8fc184ed9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanationTests.java @@ -23,9 +23,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -81,8 +83,10 @@ public void testExplanationSerialization() throws Exception { public void testExplanationToXContent() throws Exception { ClusterAllocationExplanation cae = randomClusterAllocationExplanation(true, true); + AbstractChunkedSerializingTestCase.assertChunkCount(cae, ignored -> 3); + XContentBuilder builder = XContentFactory.jsonBuilder(); - cae.toXContent(builder, ToXContent.EMPTY_PARAMS); + ChunkedToXContent.wrapAsToXContent(cae).toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals(XContentHelper.stripWhitespace(Strings.format(""" { "index": "idx", @@ -105,8 +109,9 @@ public void testExplanationToXContent() throws Exception { public void testRandomShardExplanationToXContent() throws Exception { ClusterAllocationExplanation cae = randomClusterAllocationExplanation(true, false); + AbstractChunkedSerializingTestCase.assertChunkCount(cae, ignored -> 3); XContentBuilder builder = XContentFactory.jsonBuilder(); - cae.toXContent(builder, ToXContent.EMPTY_PARAMS); + ChunkedToXContent.wrapAsToXContent(cae).toXContent(builder, ToXContent.EMPTY_PARAMS); final String actual = Strings.toString(builder); assertThat( actual, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index bb0e9977e3ac7..78bf709ef42f7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -36,11 +36,11 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskCancellationService; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportService; diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index b5ab63140e433..9c61c5d5eeedd 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -130,7 +131,12 @@ public void testToXContentWithDeprecatedClusterState() { { "node_id": "node0", "transport_version": "8000099", - "mappings_versions": {} + "mappings_versions": { + ".system-index": { + "version": 1, + "hash": 0 + } + } } ], "metadata": { @@ -323,7 +329,11 @@ private static ClusterState createClusterState() { var node0 = DiscoveryNodeUtils.create("node0", new TransportAddress(TransportAddress.META_ADDRESS, 9000)); return ClusterState.builder(new ClusterName("test")) .nodes(new DiscoveryNodes.Builder().add(node0).masterNodeId(node0.getId()).build()) - .putTransportVersion(node0.getId(), TransportVersions.V_8_0_0) + .putCompatibilityVersions( + node0.getId(), + TransportVersions.V_8_0_0, + Map.of(".system-index", new SystemIndexDescriptor.MappingsVersion(1, 0)) + ) .metadata( Metadata.builder() .put( diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java index d68641d04dd74..a53755bfcec7b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexActionTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.create; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.ActionFilters; @@ -17,8 +18,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataCreateIndexService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.SystemIndices; @@ -30,8 +37,11 @@ import org.junit.Before; import org.mockito.ArgumentCaptor; +import java.net.InetAddress; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_HIDDEN; import static org.hamcrest.Matchers.equalTo; @@ -42,13 +52,36 @@ public class TransportCreateIndexActionTests extends ESTestCase { + private static final String UNMANAGED_SYSTEM_INDEX_NAME = ".my-system"; + private static final String MANAGED_SYSTEM_INDEX_NAME = ".my-managed"; + private static final String SYSTEM_ALIAS_NAME = ".my-alias"; private static final ClusterState CLUSTER_STATE = ClusterState.builder(new ClusterName("test")) .metadata(Metadata.builder().build()) + .nodes( + DiscoveryNodes.builder() + .add( + new DiscoveryNode( + "node-1", + "node-1", + new TransportAddress(InetAddress.getLoopbackAddress(), 9300), + Map.of(), + Set.of(DiscoveryNodeRole.DATA_ROLE), + VersionInformation.CURRENT + ) + ) + .build() + ) + .compatibilityVersions( + Map.of( + "node-1", + new CompatibilityVersions( + TransportVersion.current(), + Map.of(MANAGED_SYSTEM_INDEX_NAME + "-primary", new SystemIndexDescriptor.MappingsVersion(1, 1)) + ) + ) + ) .build(); - private static final String UNMANAGED_SYSTEM_INDEX_NAME = ".my-system"; - private static final String MANAGED_SYSTEM_INDEX_NAME = ".my-managed"; - private static final String SYSTEM_ALIAS_NAME = ".my-alias"; private static final SystemIndices SYSTEM_INDICES = new SystemIndices( List.of( new SystemIndices.Feature( diff --git a/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java b/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java index d859575a2cdaf..3794fb0d85c8a 100644 --- a/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/KnnSearchSingleNodeTests.java @@ -409,7 +409,7 @@ public void testKnnSearchAction() throws IOException { assertEquals(2, response.getHits().getHits().length); } - public void testKnnVectorsWith2048Dims() throws IOException { + public void testKnnVectorsWith4096Dims() throws IOException { int numShards = 1 + randomInt(3); Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards).build(); @@ -418,7 +418,7 @@ public void testKnnVectorsWith2048Dims() throws IOException { .startObject("properties") .startObject("vector") .field("type", "dense_vector") - .field("dims", 2048) + .field("dims", 4096) .field("index", true) .field("similarity", "l2_norm") .endObject() @@ -427,18 +427,18 @@ public void testKnnVectorsWith2048Dims() throws IOException { createIndex("index", indexSettings, builder); for (int doc = 0; doc < 10; doc++) { - client().prepareIndex("index").setSource("vector", randomVector(2048)).get(); + client().prepareIndex("index").setSource("vector", randomVector(4096)).get(); } indicesAdmin().prepareRefresh("index").get(); - float[] queryVector = randomVector(2048); + float[] queryVector = randomVector(4096); KnnSearchBuilder knnSearch = new KnnSearchBuilder("vector", queryVector, 3, 50, null).boost(5.0f); SearchResponse response = client().prepareSearch("index").setKnnSearch(List.of(knnSearch)).addFetchField("*").setSize(10).get(); assertHitCount(response, 3); assertEquals(3, response.getHits().getHits().length); - assertEquals(2048, response.getHits().getAt(0).field("vector").getValues().size()); + assertEquals(4096, response.getHits().getAt(0).field("vector").getValues().size()); } private float[] randomVector() { diff --git a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java index 843a65aa877ac..09ef0b6affc23 100644 --- a/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/elasticsearch/bootstrap/BootstrapChecksTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -128,10 +129,27 @@ public void testEnforceLimitsWhenPublishingToNonLocalAddress() { } public void testExceptionAggregation() { - final List checks = Arrays.asList( - context -> BootstrapCheck.BootstrapCheckResult.failure("first"), - context -> BootstrapCheck.BootstrapCheckResult.failure("second") - ); + final List checks = Arrays.asList(new BootstrapCheck() { + @Override + public BootstrapCheckResult check(BootstrapContext context) { + return BootstrapCheck.BootstrapCheckResult.failure("first"); + } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECKS; + } + }, new BootstrapCheck() { + @Override + public BootstrapCheckResult check(BootstrapContext context) { + return BootstrapCheck.BootstrapCheckResult.failure("second"); + } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECKS; + } + }); final NodeValidationException e = expectThrows( NodeValidationException.class, @@ -146,7 +164,8 @@ public void testExceptionAggregation() { containsString("bootstrap check failure [1] of [2]:"), containsString("first"), containsString("bootstrap check failure [2] of [2]:"), - containsString("second") + containsString("second"), + containsString("For more information see [https://www.elastic.co/guide/en/elasticsearch/reference/") ) ) ); @@ -194,6 +213,7 @@ boolean isMemoryLocked() { "initial heap size [" + initialHeapSize.get() + "] " + "not equal to maximum heap size [" + maxHeapSize.get() + "]" ) ); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); final String memoryLockingMessage = "and prevents memory locking from locking the entire heap"; final Matcher memoryLockingMatcher; if (isMemoryLocked) { @@ -243,6 +263,7 @@ long getMaxFileDescriptorCount() { () -> BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)) ); assertThat(e.getMessage(), containsString("max file descriptors")); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); maxFileDescriptorCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); @@ -300,6 +321,10 @@ boolean isMemoryLocked() { () -> BootstrapChecks.check(bootstrapContext, true, Collections.singletonList(check)) ); assertThat(e.getMessage(), containsString("memory locking requested for elasticsearch process but memory is not locked")); + assertThat( + e.getMessage(), + containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/") + ); } else { // nothing should happen BootstrapChecks.check(bootstrapContext, true, Collections.singletonList(check)); @@ -322,6 +347,7 @@ long getMaxNumberOfThreads() { () -> BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)) ); assertThat(e.getMessage(), containsString("max number of threads")); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE)); @@ -353,6 +379,7 @@ long getRlimInfinity() { () -> BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)) ); assertThat(e.getMessage(), containsString("max size virtual memory")); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); maxSizeVirtualMemory.set(rlimInfinity); @@ -383,6 +410,7 @@ long getRlimInfinity() { () -> BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)) ); assertThat(e.getMessage(), containsString("max file size")); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); maxFileSize.set(rlimInfinity); @@ -413,6 +441,7 @@ String getVmName() { + "but should be using a server VM for the best performance" ) ); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); vmName.set("Java HotSpot(TM) 32-Bit Server VM"); BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)); @@ -441,6 +470,7 @@ String getUseSerialGC() { + "] or -XX:+UseSerialGC was explicitly specified" ) ); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); useSerialGC.set("false"); BootstrapChecks.check(emptyContext, true, Collections.singletonList(check)); @@ -464,6 +494,7 @@ boolean isSystemCallFilterInstalled() { () -> BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck)) ); assertThat(e.getMessage(), containsString("system call filters failed to install; check the logs and fix your configuration")); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); isSystemCallFilterInstalled.set(true); BootstrapChecks.check(context, true, Collections.singletonList(systemCallFilterEnabledCheck)); @@ -489,13 +520,13 @@ String message(BootstrapContext context) { } }; - runMightForkTest( - check, - isSystemCallFilterInstalled, - () -> mightFork.set(false), - () -> mightFork.set(true), - e -> assertThat(e.getMessage(), containsString("error")) - ); + runMightForkTest(check, isSystemCallFilterInstalled, () -> mightFork.set(false), () -> mightFork.set(true), e -> { + assertThat(e.getMessage(), containsString("error")); + assertThat( + e.getMessage(), + containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/") + ); + }); } public void testOnErrorCheck() throws NodeValidationException { @@ -521,15 +552,21 @@ String onError() { isSystemCallFilterInstalled, () -> onError.set(randomBoolean() ? "" : null), () -> onError.set(command), - e -> assertThat( - e.getMessage(), - containsString( - "OnError [" - + command - + "] requires forking but is prevented by system call filters;" - + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError" - ) - ) + e -> { + assertThat( + e.getMessage(), + containsString( + "OnError [" + + command + + "] requires forking but is prevented by system call filters;" + + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError" + ) + ); + assertThat( + e.getMessage(), + containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/") + ); + } ); } @@ -556,16 +593,22 @@ String onOutOfMemoryError() { isSystemCallFilterInstalled, () -> onOutOfMemoryError.set(randomBoolean() ? "" : null), () -> onOutOfMemoryError.set(command), - e -> assertThat( - e.getMessage(), - containsString( - "OnOutOfMemoryError [" - + command - + "]" - + " requires forking but is prevented by system call filters;" - + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError" - ) - ) + e -> { + assertThat( + e.getMessage(), + containsString( + "OnOutOfMemoryError [" + + command + + "]" + + " requires forking but is prevented by system call filters;" + + " upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError" + ) + ); + assertThat( + e.getMessage(), + containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/") + ); + } ); } @@ -629,6 +672,7 @@ String javaVersion() { e.getMessage(), containsString("Java version [" + javaVersion.get() + "] is an early-access build, only use release builds") ); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); // if not on an early-access build, nothing should happen javaVersion.set(randomFrom("1.8.0_152", "9")); @@ -651,6 +695,7 @@ boolean isAllPermissionGranted() { () -> BootstrapChecks.check(emptyContext, true, checks) ); assertThat(e, hasToString(containsString("granting the all permission effectively disables security"))); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); // if all permissions are not granted, nothing should happen isAllPermissionGranted.set(false); @@ -668,6 +713,11 @@ public BootstrapCheckResult check(BootstrapContext context) { public boolean alwaysEnforce() { return true; } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECKS; + } }; final NodeValidationException alwaysEnforced = expectThrows( @@ -678,7 +728,8 @@ public boolean alwaysEnforce() { } public void testDiscoveryConfiguredCheck() throws NodeValidationException { - final List checks = Collections.singletonList(new BootstrapChecks.DiscoveryConfiguredCheck()); + final BootstrapChecks.DiscoveryConfiguredCheck check = new BootstrapChecks.DiscoveryConfiguredCheck(); + final List checks = Collections.singletonList(check); final BootstrapContext zen2Context = createTestContext( Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), MULTI_NODE_DISCOVERY_TYPE).build(), @@ -713,6 +764,7 @@ public void testDiscoveryConfiguredCheck() throws NodeValidationException { ) ) ); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); CheckedConsumer ensureChecksPass = b -> { final BootstrapContext context = createTestContext( @@ -741,6 +793,7 @@ ByteOrder nativeByteOrder() { () -> BootstrapChecks.check(emptyContext, true, List.of(byteOrderCheck)) ); assertThat(e.getMessage(), containsString("Little-endian native byte order is required to run Elasticsearch")); + assertThat(e.getMessage(), containsString("; for more information see [https://www.elastic.co/guide/en/elasticsearch/reference/")); reference[0] = ByteOrder.LITTLE_ENDIAN; BootstrapChecks.check(emptyContext, true, List.of(byteOrderCheck)); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 89aacb6f03932..46c6d1db47a7c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -46,6 +46,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemIndexDescriptor; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; @@ -1049,7 +1050,7 @@ private ClusterState buildClusterState() throws IOException { .add(DiscoveryNodeUtils.create("nodeId1", new TransportAddress(InetAddress.getByName("127.0.0.1"), 111))) .build() ) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Map.of( "nodeId1", new CompatibilityVersions(TransportVersion.current(), Map.of(".tasks", new SystemIndexDescriptor.MappingsVersion(1, 1))) @@ -1159,7 +1160,7 @@ public void testGetMinTransportVersion() throws IOException { for (int i = 0; i < numNodes; i++) { TransportVersion tv = TransportVersionUtils.randomVersion(); - builder.putTransportVersion("nodeTv" + i, tv); + builder.putCompatibilityVersions("nodeTv" + i, tv, SystemIndices.SERVER_SYSTEM_MAPPINGS_VERSIONS); minVersion = Collections.min(List.of(minVersion, tv)); } @@ -1175,6 +1176,73 @@ public void testGetMinTransportVersion() throws IOException { ); } + public void testHasMixedSystemIndexVersions() throws IOException { + // equal mappings versions + { + var builder = ClusterState.builder(buildClusterState()); + builder.compatibilityVersions( + Map.of( + "node1", + new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index", new SystemIndexDescriptor.MappingsVersion(1, 0)) + ), + "node2", + new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index", new SystemIndexDescriptor.MappingsVersion(1, 0)) + ) + ) + ); + assertFalse(builder.build().hasMixedSystemIndexVersions()); + } + + // unequal mappings versions + { + var builder = ClusterState.builder(buildClusterState()); + builder.compatibilityVersions( + Map.of( + "node1", + new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index", new SystemIndexDescriptor.MappingsVersion(1, 0)) + ), + "node2", + new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index", new SystemIndexDescriptor.MappingsVersion(2, 0)) + ) + ) + ); + assertTrue(builder.build().hasMixedSystemIndexVersions()); + } + + // one node has a mappings version that the other is missing + { + var builder = ClusterState.builder(buildClusterState()); + builder.compatibilityVersions( + Map.of( + "node1", + new CompatibilityVersions( + TransportVersion.current(), + Map.of( + ".system-index", + new SystemIndexDescriptor.MappingsVersion(1, 0), + ".another-system-index", + new SystemIndexDescriptor.MappingsVersion(1, 0) + ) + ), + "node2", + new CompatibilityVersions( + TransportVersion.current(), + Map.of(".system-index", new SystemIndexDescriptor.MappingsVersion(1, 0)) + ) + ) + ); + assertTrue(builder.build().hasMixedSystemIndexVersions()); + } + } + public static int expectedChunkCount(ToXContent.Params params, ClusterState clusterState) { final var metrics = ClusterState.Metric.parseString(params.param("metric", "_all"), true); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index db996ec397716..4877ece7712bd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -25,13 +25,13 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.monitor.StatusInfo; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportException; diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java index 8a3de96f02f91..04587018fc9ca 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataDeleteIndexServiceTests.java @@ -73,7 +73,10 @@ public void setUp() throws Exception { public void testDeleteMissing() { Index index = new Index("missing", "doesn't matter"); ClusterState state = ClusterState.builder(ClusterName.DEFAULT).build(); - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> service.deleteIndices(state, Set.of(index))); + IndexNotFoundException e = expectThrows( + IndexNotFoundException.class, + () -> MetadataDeleteIndexService.deleteIndices(state, Set.of(index), Settings.EMPTY) + ); assertEquals(index, e.getIndex()); } @@ -100,7 +103,11 @@ public void testDeleteSnapshotting() { ClusterState state = ClusterState.builder(clusterState(index)).putCustom(SnapshotsInProgress.TYPE, snaps).build(); Exception e = expectThrows( SnapshotInProgressException.class, - () -> service.deleteIndices(state, Set.of(state.metadata().getIndices().get(index).getIndex())) + () -> MetadataDeleteIndexService.deleteIndices( + state, + Set.of(state.metadata().getIndices().get(index).getIndex()), + Settings.EMPTY + ) ); assertEquals( "Cannot delete indices that are being snapshotted: [[" @@ -155,7 +162,11 @@ public void testDeleteIndexWithAnAlias() { .blocks(ClusterBlocks.builder().addBlocks(idxMetadata)) .build(); - ClusterState after = service.deleteIndices(before, Set.of(before.metadata().getIndices().get(index).getIndex())); + ClusterState after = MetadataDeleteIndexService.deleteIndices( + before, + Set.of(before.metadata().getIndices().get(index).getIndex()), + Settings.EMPTY + ); assertNull(after.metadata().getIndices().get(index)); assertNull(after.routingTable().index(index)); @@ -175,7 +186,7 @@ public void testDeleteBackingIndexForDataStream() { int numIndexToDelete = randomIntBetween(1, numBackingIndices - 1); Index indexToDelete = before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, numIndexToDelete)).getIndex(); - ClusterState after = service.deleteIndices(before, Set.of(indexToDelete)); + ClusterState after = MetadataDeleteIndexService.deleteIndices(before, Set.of(indexToDelete), Settings.EMPTY); assertThat(after.metadata().getIndices().get(indexToDelete.getName()), nullValue()); assertThat(after.metadata().getIndices().size(), equalTo(numBackingIndices - 1)); @@ -200,7 +211,7 @@ public void testDeleteMultipleBackingIndexForDataStream() { for (int k : indexNumbersToDelete) { indicesToDelete.add(before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, k)).getIndex()); } - ClusterState after = service.deleteIndices(before, indicesToDelete); + ClusterState after = MetadataDeleteIndexService.deleteIndices(before, indicesToDelete, Settings.EMPTY); DataStream dataStream = after.metadata().dataStreams().get(dataStreamName); assertThat(dataStream, notNullValue()); @@ -221,7 +232,10 @@ public void testDeleteCurrentWriteIndexForDataStream() { ); Index indexToDelete = before.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, numBackingIndices)).getIndex(); - Exception e = expectThrows(IllegalArgumentException.class, () -> service.deleteIndices(before, Set.of(indexToDelete))); + Exception e = expectThrows( + IllegalArgumentException.class, + () -> MetadataDeleteIndexService.deleteIndices(before, Set.of(indexToDelete), Settings.EMPTY) + ); assertThat( e.getMessage(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java index 1fbd578cc1bed..0901b1190cfc0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexAliasesServiceTests.java @@ -17,19 +17,25 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Tuple; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.TimeUnit; import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; @@ -42,33 +48,32 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anySet; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class MetadataIndexAliasesServiceTests extends ESTestCase { - private final MetadataDeleteIndexService deleteIndexService = mock(MetadataDeleteIndexService.class); - private final MetadataIndexAliasesService service = new MetadataIndexAliasesService( - mock(ClusterService.class), - null, - deleteIndexService, - xContentRegistry() - ); - - public MetadataIndexAliasesServiceTests() { - // Mock any deletes so we don't need to worry about how MetadataDeleteIndexService does its job - when(deleteIndexService.deleteIndices(any(ClusterState.class), anySet())).then(i -> { - ClusterState state = (ClusterState) i.getArguments()[0]; - @SuppressWarnings("unchecked") - Collection indices = (Collection) i.getArguments()[1]; - Metadata.Builder meta = Metadata.builder(state.metadata()); - for (Index index : indices) { - assertTrue("index now found", state.metadata().hasIndexAbstraction(index.getName())); - meta.remove(index.getName()); // We only think about metadata for this test. Not routing or any other fun stuff. - } - return ClusterState.builder(state).metadata(meta).build(); - }); + private static TestThreadPool threadPool; + private ClusterService clusterService; + private MetadataIndexAliasesService service; + + @BeforeClass + public static void setupThreadPool() { + threadPool = new TestThreadPool(getTestClass().getName()); + } + + @Before + public void setupServices() { + clusterService = ClusterServiceUtils.createClusterService(threadPool); + service = new MetadataIndexAliasesService(clusterService, null, xContentRegistry()); + } + + @After + public void closeClusterService() throws Exception { + clusterService.close(); + } + + @AfterClass + public static void tearDownThreadPool() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; } public void testAddAndRemove() { diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index 3c8540c7771c6..323c50bf23c3b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -113,7 +113,7 @@ public void testNothingFixedWhenNothingToInfer() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_8_0)) - .compatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) + .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0, Map.of()))) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); @@ -128,7 +128,7 @@ public void testNothingFixedWhenOnNextVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION)) - .compatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) + .nodeIdsToCompatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION, Map.of()))) .build(); TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); @@ -143,7 +143,7 @@ public void testNothingFixedWhenOnPreviousVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_7_0, Version.V_8_8_0)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(TransportVersions.V_8_7_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -164,7 +164,7 @@ public void testVersionsAreFixed() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -193,7 +193,7 @@ public void testConcurrentChangesDoNotOverlap() { ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -208,7 +208,7 @@ public void testConcurrentChangesDoNotOverlap() { ClusterState testState2 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) @@ -229,7 +229,7 @@ public void testFailedRequestsAreRetried() { ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .compatibilityVersions( + .nodeIdsToCompatibilityVersions( Maps.transformValues( versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), transportVersion -> new CompatibilityVersions(transportVersion, Map.of()) diff --git a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index d2a7036b7db6f..b4227e2e523b7 100644 --- a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -23,10 +23,10 @@ import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.NetworkPlugin; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportRequest; diff --git a/server/src/test/java/org/elasticsearch/common/settings/RotatableSecretTests.java b/server/src/test/java/org/elasticsearch/common/settings/RotatableSecretTests.java new file mode 100644 index 0000000000000..cf4d1c3d4f204 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/settings/RotatableSecretTests.java @@ -0,0 +1,249 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.mockito.stubbing.Answer; + +import java.time.Instant; +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RotatableSecretTests extends ESTestCase { + + private final SecureString secret1 = new SecureString(randomAlphaOfLength(10)); + private final SecureString secret2 = new SecureString(randomAlphaOfLength(10)); + private final SecureString secret3 = new SecureString(randomAlphaOfLength(10)); + + public void testBasicRotation() throws Exception { + // initial state + RotatableSecret rotatableSecret = new RotatableSecret(secret1); + assertTrue(rotatableSecret.matches(secret1)); + assertFalse(rotatableSecret.matches(secret2)); + assertFalse(rotatableSecret.matches(new SecureString(randomAlphaOfLength(10)))); + assertTrue(rotatableSecret.isSet()); + assertEquals(secret1, rotatableSecret.getSecrets().current()); + assertNull(rotatableSecret.getSecrets().prior()); + assertEquals(Instant.EPOCH, rotatableSecret.getSecrets().priorValidTill()); + + // normal rotation + TimeValue expiresIn = TimeValue.timeValueDays(1); + rotatableSecret.rotate(secret2, expiresIn); + assertTrue(rotatableSecret.matches(secret1)); + assertTrue(rotatableSecret.matches(secret2)); + assertFalse(rotatableSecret.matches(new SecureString(randomAlphaOfLength(10)))); + assertTrue(rotatableSecret.isSet()); + assertEquals(secret2, rotatableSecret.getSecrets().current()); + assertEquals(secret1, rotatableSecret.getSecrets().prior()); + assertTrue(rotatableSecret.getSecrets().priorValidTill().isAfter(Instant.now())); + assertTrue( + rotatableSecret.getSecrets().priorValidTill().isBefore(Instant.now().plusMillis(TimeValue.timeValueDays(2).getMillis())) + ); + + // attempt to rotate same value does nothing + rotatableSecret.rotate(secret2, TimeValue.timeValueDays(99)); // ignores the new expiry since you can't rotate the same secret + assertTrue(rotatableSecret.matches(secret1)); + assertTrue(rotatableSecret.matches(secret2)); + assertFalse(rotatableSecret.matches(new SecureString(randomAlphaOfLength(10)))); + assertTrue(rotatableSecret.isSet()); + assertEquals(secret2, rotatableSecret.getSecrets().current()); + assertEquals(secret1, rotatableSecret.getSecrets().prior()); + assertTrue(rotatableSecret.getSecrets().priorValidTill().isAfter(Instant.now())); + assertTrue( + rotatableSecret.getSecrets().priorValidTill().isBefore(Instant.now().plusMillis(TimeValue.timeValueDays(2).getMillis())) + ); + + // rotate with expiry + rotatableSecret.rotate(secret3, TimeValue.timeValueMillis(1)); + Thread.sleep(2); // ensure secret2 is expired + assertTrue(rotatableSecret.matches(secret3)); + assertFalse(rotatableSecret.matches(secret1)); + assertFalse(rotatableSecret.matches(secret2)); + assertFalse(rotatableSecret.matches(new SecureString(randomAlphaOfLength(10)))); + assertTrue(rotatableSecret.isSet()); + assertEquals(secret3, rotatableSecret.getSecrets().current()); + assertNull(rotatableSecret.getSecrets().prior()); + assertTrue(rotatableSecret.getSecrets().priorValidTill().isBefore(Instant.now())); + + // unset current and prior + rotatableSecret.rotate(null, TimeValue.ZERO); + assertFalse(rotatableSecret.matches(secret3)); + assertFalse(rotatableSecret.matches(secret1)); + assertFalse(rotatableSecret.matches(secret2)); + assertFalse(rotatableSecret.matches(new SecureString(randomAlphaOfLength(10)))); + assertFalse(rotatableSecret.isSet()); + assertNull(rotatableSecret.getSecrets().current()); + assertNull(rotatableSecret.getSecrets().prior()); + assertTrue(rotatableSecret.getSecrets().priorValidTill().isBefore(Instant.now())); + } + + public void testConcurrentReadWhileLocked() throws Exception { + // initial state + RotatableSecret rotatableSecret = new RotatableSecret(secret1); + assertTrue(rotatableSecret.matches(secret1)); + assertFalse(rotatableSecret.matches(secret2)); + assertEquals(secret1, rotatableSecret.getSecrets().current()); + assertNull(rotatableSecret.getSecrets().prior()); + + boolean expired = randomBoolean(); + CountDownLatch latch = new CountDownLatch(1); + TimeValue mockGracePeriod = mock(TimeValue.class); // use a mock to force a long rotation to exercise the concurrency + when(mockGracePeriod.getMillis()).then((Answer) invocation -> { + latch.await(); + return expired ? 0L : Long.MAX_VALUE; + }); + + // start writer thread + Thread t1 = new Thread(() -> rotatableSecret.rotate(secret2, mockGracePeriod)); + t1.start(); + assertBusy(() -> assertEquals(Thread.State.WAITING, t1.getState())); // waiting on countdown latch, holds write lock + assertTrue(rotatableSecret.isWriteLocked()); + + // start reader threads + int readers = randomIntBetween(1, 16); + Set readerThreads = new HashSet<>(readers); + for (int i = 0; i < readers; i++) { + Thread t = new Thread(() -> { + if (randomBoolean()) { // either matches or isSet can block + if (expired) { + assertFalse(rotatableSecret.matches(secret1)); + } else { + assertTrue(rotatableSecret.matches(secret1)); + } + assertTrue(rotatableSecret.matches(secret2)); + } else { + assertTrue(rotatableSecret.isSet()); + } + }); + readerThreads.add(t); + t.start(); + } + for (Thread t : readerThreads) { + assertBusy(() -> assertEquals(Thread.State.WAITING, t.getState())); // waiting on write lock from thread 1 to be released + } + assertTrue(rotatableSecret.isWriteLocked()); + latch.countDown(); // let thread1 finish, which also unblocks the reader threads + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t1.getState())); // done with work + for (Thread t : readerThreads) { + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t.getState())); // done with work + t.join(); + } + t1.join(); + assertFalse(rotatableSecret.isWriteLocked()); + } + + public void testConcurrentRotations() throws Exception { + // initial state + RotatableSecret rotatableSecret = new RotatableSecret(secret1); + assertTrue(rotatableSecret.matches(secret1)); + assertFalse(rotatableSecret.matches(secret2)); + assertEquals(secret1, rotatableSecret.getSecrets().current()); + assertNull(rotatableSecret.getSecrets().prior()); + + // start first rotation + AtomicBoolean latch1 = new AtomicBoolean(false); // using boolean as latch to differentiate the kinds of waiting + TimeValue mockGracePeriod1 = mock(TimeValue.class); // use a mock to force a long rotation to exercise the concurrency + when(mockGracePeriod1.getMillis()).then((Answer) invocation -> { + while (latch1.get() == false) { + Thread.sleep(10); // thread in TIMED_WAITING + } + return Long.MAX_VALUE; + }); + Thread t1 = new Thread(() -> rotatableSecret.rotate(secret2, mockGracePeriod1)); + t1.start(); + assertBusy(() -> assertEquals(Thread.State.TIMED_WAITING, t1.getState())); // waiting on latch, holds write lock + + // start second rotation + AtomicBoolean latch2 = new AtomicBoolean(false); + TimeValue mockGracePeriod2 = mock(TimeValue.class); // use a mock to force a long rotation to exercise the concurrency + when(mockGracePeriod2.getMillis()).then((Answer) invocation -> { + while (latch2.get() == false) { + Thread.sleep(10); // thread in TIMED_WAITING + } + return Long.MAX_VALUE; + }); + Thread t2 = new Thread(() -> rotatableSecret.rotate(secret3, mockGracePeriod2)); + t2.start(); + assertBusy(() -> assertEquals(Thread.State.WAITING, t2.getState())); // waiting on write lock from thread 1 + + // start third rotation + AtomicBoolean latch3 = new AtomicBoolean(false); + TimeValue mockGracePeriod3 = mock(TimeValue.class); // use a mock to force a long rotation to exercise the concurrency + when(mockGracePeriod3.getMillis()).then((Answer) invocation -> { + while (latch3.get() == false) { + Thread.sleep(10); // thread in TIMED_WAITING + } + return Long.MAX_VALUE; + }); + Thread t3 = new Thread(() -> rotatableSecret.rotate(null, mockGracePeriod3)); + t3.start(); + assertBusy(() -> assertEquals(Thread.State.WAITING, t3.getState())); // waiting on write lock from thread 1 + + // initial state + assertEquals(rotatableSecret.getSecrets().current(), secret1); + assertNull(rotatableSecret.getSecrets().prior()); + assertBusy(() -> assertEquals(Thread.State.TIMED_WAITING, t1.getState())); // waiting on latch + assertBusy(() -> assertEquals(Thread.State.WAITING, t2.getState())); // waiting on lock + assertBusy(() -> assertEquals(Thread.State.WAITING, t3.getState())); // waiting on lock + + latch1.set(true); // let first rotation succeed + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t1.getState())); // work done + assertBusy(() -> assertEquals(Thread.State.TIMED_WAITING, t2.getState())); // waiting on latch + assertBusy(() -> assertEquals(Thread.State.WAITING, t3.getState())); // waiting lock + assertEquals(rotatableSecret.getSecrets().current(), secret2); + assertEquals(rotatableSecret.getSecrets().prior(), secret1); + + latch2.set(true); // let second rotation succeed + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t1.getState())); // work done + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t2.getState())); // work done + assertBusy(() -> assertEquals(Thread.State.TIMED_WAITING, t3.getState())); // waiting on latch + assertEquals(rotatableSecret.getSecrets().current(), secret3); + assertEquals(rotatableSecret.getSecrets().prior(), secret2); + + latch3.set(true); // let third rotation succeed + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t1.getState())); // work done + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t2.getState())); // work done + assertBusy(() -> assertEquals(Thread.State.TERMINATED, t3.getState())); // work done + assertEquals(rotatableSecret.getSecrets().current(), null); + assertEquals(rotatableSecret.getSecrets().prior(), secret3); + + t1.join(); + t2.join(); + t3.join(); + } + + public void testUnsetThenRotate() { + // it is not set on startup + RotatableSecret rotatableSecret = new RotatableSecret(null); + assertFalse(rotatableSecret.matches(new SecureString(randomAlphaOfLength(10)))); + assertFalse(rotatableSecret.isSet()); + assertNull(rotatableSecret.getSecrets().current()); + assertNull(rotatableSecret.getSecrets().prior()); + assertEquals(Instant.EPOCH, rotatableSecret.getSecrets().priorValidTill()); + + // normal rotation for when it was not set on startup + TimeValue expiresIn = TimeValue.timeValueDays(1); + rotatableSecret.rotate(secret1, expiresIn); + assertTrue(rotatableSecret.matches(secret1)); + assertFalse(rotatableSecret.matches(new SecureString(randomAlphaOfLength(10)))); + assertTrue(rotatableSecret.isSet()); + assertEquals(secret1, rotatableSecret.getSecrets().current()); + assertNull(rotatableSecret.getSecrets().prior()); + assertTrue(rotatableSecret.getSecrets().priorValidTill().isAfter(Instant.now())); + assertTrue( + rotatableSecret.getSecrets().priorValidTill().isBefore(Instant.now().plusMillis(TimeValue.timeValueDays(2).getMillis())) + ); + } +} diff --git a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java index e844cae4d2bb8..c61187066487d 100644 --- a/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/PeerFinderTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -29,7 +30,6 @@ import org.elasticsearch.test.transport.CapturingTransport.CapturedRequest; import org.elasticsearch.test.transport.StubbableConnectionManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.ConnectionManager; import org.elasticsearch.transport.TransportException; diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index b8d102db7e8ae..86c48c1e183ea 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -39,13 +39,13 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.transport.TransportSettings; import org.elasticsearch.usage.UsageService; diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index c39302e710d7e..6e0f58d0cdb97 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -36,13 +36,13 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java index 530a901d1cda6..02259a24a5e94 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TextFieldMapperTests.java @@ -57,12 +57,18 @@ import org.elasticsearch.index.analysis.StandardTokenizerFactory; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.LeafFieldData; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import org.elasticsearch.index.query.MatchPhrasePrefixQueryBuilder; import org.elasticsearch.index.query.MatchPhraseQueryBuilder; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.search.MatchQueryParser; import org.elasticsearch.index.search.QueryStringQueryParser; +import org.elasticsearch.script.field.TextDocValuesField; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.lookup.SourceProvider; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -75,6 +81,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -1280,4 +1287,34 @@ public void testDocValuesLoadedFromSubStoredKeywordSynthetic() throws IOExceptio assertScriptDocValues(mapper, input, equalTo(List.of(input))); } } + + public void testEmpty() throws Exception { + MapperService mapperService = createMapperService(fieldMapping(b -> b.field("type", "text"))); + var d0 = source(b -> b.field("field", new String[0])); + var d1 = source(b -> b.field("field", "")); + var d2 = source(b -> b.field("field", "hello")); + var d3 = source(b -> b.nullField("field")); + withLuceneIndex(mapperService, iw -> { + for (SourceToParse src : List.of(d0, d1, d2, d3)) { + iw.addDocument(mapperService.documentMapper().parse(src).rootDoc()); + } + }, reader -> { + IndexSearcher searcher = newSearcher(reader); + MappedFieldType ft = mapperService.fieldType("field"); + SourceProvider sourceProvider = mapperService.mappingLookup().isSourceSynthetic() ? (ctx, doc) -> { + throw new IllegalArgumentException("Can't load source in scripts in synthetic mode"); + } : SourceProvider.fromStoredFields(); + SearchLookup searchLookup = new SearchLookup(null, null, sourceProvider); + IndexFieldData sfd = ft.fielddataBuilder( + new FieldDataContext("", () -> searchLookup, Set::of, MappedFieldType.FielddataOperation.SCRIPT) + ).build(null, null); + LeafFieldData lfd = sfd.load(getOnlyLeafReader(searcher.getIndexReader()).getContext()); + TextDocValuesField scriptDV = (TextDocValuesField) lfd.getScriptFieldFactory("field"); + SortedBinaryDocValues dv = scriptDV.getInput(); + assertFalse(dv.advanceExact(0)); + assertTrue(dv.advanceExact(1)); + assertTrue(dv.advanceExact(2)); + assertFalse(dv.advanceExact(3)); + }); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index c2762d859f266..5b911ee1348db 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -197,7 +197,7 @@ public void testDims() { assertThat( e.getMessage(), equalTo( - "Failed to parse mapping: " + "The number of dimensions for field [field] should be in the range [1, 2048] but was [0]" + "Failed to parse mapping: " + "The number of dimensions for field [field] should be in the range [1, 4096] but was [0]" ) ); } @@ -205,13 +205,13 @@ public void testDims() { { Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { b.field("type", "dense_vector"); - b.field("dims", 3000); + b.field("dims", 5000); }))); assertThat( e.getMessage(), equalTo( "Failed to parse mapping: " - + "The number of dimensions for field [field] should be in the range [1, 2048] but was [3000]" + + "The number of dimensions for field [field] should be in the range [1, 4096] but was [5000]" ) ); } @@ -220,13 +220,13 @@ public void testDims() { Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { b.field("type", "dense_vector"); b.field("index", "true"); - b.field("dims", 3000); + b.field("dims", 5000); }))); assertThat( e.getMessage(), equalTo( "Failed to parse mapping: " - + "The number of dimensions for field [field] should be in the range [1, 2048] but was [3000]" + + "The number of dimensions for field [field] should be in the range [1, 4096] but was [5000]" ) ); } @@ -597,10 +597,10 @@ public void testDocumentsWithIncorrectDims() throws Exception { /** * Test that max dimensions limit for float dense_vector field - * is 2048 as defined by {@link DenseVectorFieldMapper#MAX_DIMS_COUNT} + * is 4096 as defined by {@link DenseVectorFieldMapper#MAX_DIMS_COUNT} */ public void testMaxDimsFloatVector() throws IOException { - final int dims = 2048; + final int dims = 4096; VectorSimilarity similarity = VectorSimilarity.COSINE; DocumentMapper mapper = createDocumentMapper( fieldMapping(b -> b.field("type", "dense_vector").field("dims", dims).field("index", true).field("similarity", similarity)) @@ -624,10 +624,10 @@ public void testMaxDimsFloatVector() throws IOException { /** * Test that max dimensions limit for byte dense_vector field - * is 2048 as defined by {@link KnnByteVectorField} + * is 4096 as defined by {@link KnnByteVectorField} */ public void testMaxDimsByteVector() throws IOException { - final int dims = 2048; + final int dims = 4096; VectorSimilarity similarity = VectorSimilarity.COSINE; ; DocumentMapper mapper = createDocumentMapper( @@ -703,7 +703,7 @@ protected void assertFetch(MapperService mapperService, String field, Object val @Override // TODO: add `byte` element_type tests protected void randomFetchTestFieldConfig(XContentBuilder b) throws IOException { - b.field("type", "dense_vector").field("dims", randomIntBetween(2, 2048)).field("element_type", "float"); + b.field("type", "dense_vector").field("dims", randomIntBetween(2, 4096)).field("element_type", "float"); if (randomBoolean()) { b.field("index", true).field("similarity", randomFrom(VectorSimilarity.values()).toString()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index d22056d49beb5..1f9013502144e 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -158,39 +158,39 @@ public void testFloatCreateKnnQuery() { } public void testCreateKnnQueryMaxDims() { - { // float type with 2048 dims - DenseVectorFieldType fieldWith2048dims = new DenseVectorFieldType( + { // float type with 4096 dims + DenseVectorFieldType fieldWith4096dims = new DenseVectorFieldType( "f", IndexVersion.current(), DenseVectorFieldMapper.ElementType.FLOAT, - 2048, + 4096, true, VectorSimilarity.COSINE, Collections.emptyMap() ); - float[] queryVector = new float[2048]; - for (int i = 0; i < 2048; i++) { + float[] queryVector = new float[4096]; + for (int i = 0; i < 4096; i++) { queryVector[i] = randomFloat(); } - Query query = fieldWith2048dims.createKnnQuery(queryVector, 10, null, null); + Query query = fieldWith4096dims.createKnnQuery(queryVector, 10, null, null); assertThat(query, instanceOf(KnnFloatVectorQuery.class)); } - { // byte type with 2048 dims - DenseVectorFieldType fieldWith2048dims = new DenseVectorFieldType( + { // byte type with 4096 dims + DenseVectorFieldType fieldWith4096dims = new DenseVectorFieldType( "f", IndexVersion.current(), DenseVectorFieldMapper.ElementType.BYTE, - 2048, + 4096, true, VectorSimilarity.COSINE, Collections.emptyMap() ); - byte[] queryVector = new byte[2048]; - for (int i = 0; i < 2048; i++) { + byte[] queryVector = new byte[4096]; + for (int i = 0; i < 4096; i++) { queryVector[i] = randomByte(); } - Query query = fieldWith2048dims.createKnnQuery(queryVector, 10, null, null); + Query query = fieldWith4096dims.createKnnQuery(queryVector, 10, null, null); assertThat(query, instanceOf(KnnByteVectorQuery.class)); } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java index 66b130ca69bc8..64c70f93f8a3f 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/IndexLevelReplicationTests.java @@ -116,7 +116,7 @@ public void run() { IndexShard replica = shards.addReplica(); Future future = shards.asyncRecoverReplica( replica, - (indexShard, node) -> new RecoveryTarget(indexShard, node, null, null, recoveryListener) { + (indexShard, node) -> new RecoveryTarget(indexShard, node, 0L, null, null, recoveryListener) { @Override public void cleanFiles( int totalTranslogOps, @@ -199,7 +199,7 @@ public IndexResult index(Index op) throws IOException { IndexShard replica = shards.addReplica(); Future fut = shards.asyncRecoverReplica( replica, - (shard, node) -> new RecoveryTarget(shard, node, null, null, recoveryListener) { + (shard, node) -> new RecoveryTarget(shard, node, 0L, null, null, recoveryListener) { @Override public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { try { diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index b7a4c5219b68e..6773b06729a8e 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -452,7 +452,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { AtomicBoolean recoveryDone = new AtomicBoolean(false); final Future recoveryFuture = shards.asyncRecoverReplica(newReplica, (indexShard, node) -> { recoveryStart.countDown(); - return new RecoveryTarget(indexShard, node, null, null, recoveryListener) { + return new RecoveryTarget(indexShard, node, 0L, null, null, recoveryListener) { @Override public void finalizeRecovery(long globalCheckpoint, long trimAboveSeqNo, ActionListener listener) { recoveryDone.set(true); @@ -506,7 +506,7 @@ protected EngineFactory getEngineFactory(final ShardRouting routing) { final IndexShard replica = shards.addReplica(); final Future recoveryFuture = shards.asyncRecoverReplica( replica, - (indexShard, node) -> new RecoveryTarget(indexShard, node, null, null, recoveryListener) { + (indexShard, node) -> new RecoveryTarget(indexShard, node, 0L, null, null, recoveryListener) { @Override public void indexTranslogOperations( final List operations, @@ -784,7 +784,7 @@ public BlockingTarget( PeerRecoveryTargetService.RecoveryListener listener, Logger logger ) { - super(shard, sourceNode, null, null, listener); + super(shard, sourceNode, 0L, null, null, listener); this.recoveryBlocked = recoveryBlocked; this.releaseRecovery = releaseRecovery; this.stageToBlock = stageToBlock; diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index a3061df8839fb..5fdfb92d3a193 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -2856,31 +2856,37 @@ public void testTranslogRecoverySyncsTranslog() throws IOException { indexDoc(primary, "_doc", "0", "{\"foo\" : \"bar\"}"); IndexShard replica = newShard(primary.shardId(), false, "n2", metadata, null); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, null, null, recoveryListener) { - @Override - public void indexTranslogOperations( - final List operations, - final int totalTranslogOps, - final long maxSeenAutoIdTimestamp, - final long maxSeqNoOfUpdatesOrDeletes, - final RetentionLeases retentionLeases, - final long mappingVersion, - final ActionListener listener - ) { - super.indexTranslogOperations( - operations, - totalTranslogOps, - maxSeenAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, - retentionLeases, - mappingVersion, - listener.delegateFailureAndWrap((l, r) -> { - assertFalse(replica.isSyncNeeded()); - l.onResponse(r); - }) - ); - } - }, true, true); + recoverReplica( + replica, + primary, + (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, 0L, null, null, recoveryListener) { + @Override + public void indexTranslogOperations( + final List operations, + final int totalTranslogOps, + final long maxSeenAutoIdTimestamp, + final long maxSeqNoOfUpdatesOrDeletes, + final RetentionLeases retentionLeases, + final long mappingVersion, + final ActionListener listener + ) { + super.indexTranslogOperations( + operations, + totalTranslogOps, + maxSeenAutoIdTimestamp, + maxSeqNoOfUpdatesOrDeletes, + retentionLeases, + mappingVersion, + listener.delegateFailureAndWrap((l, r) -> { + assertFalse(replica.isSyncNeeded()); + l.onResponse(r); + }) + ); + } + }, + true, + true + ); closeShards(primary, replica); } @@ -2980,32 +2986,38 @@ public void testShardActiveDuringPeerRecovery() throws IOException { replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); // Shard is still inactive since we haven't started recovering yet assertFalse(replica.isActive()); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, null, null, recoveryListener) { - @Override - public void indexTranslogOperations( - final List operations, - final int totalTranslogOps, - final long maxAutoIdTimestamp, - final long maxSeqNoOfUpdatesOrDeletes, - final RetentionLeases retentionLeases, - final long mappingVersion, - final ActionListener listener - ) { - super.indexTranslogOperations( - operations, - totalTranslogOps, - maxAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, - retentionLeases, - mappingVersion, - listener.delegateFailureAndWrap((l, checkpoint) -> { - l.onResponse(checkpoint); - // Shard should now be active since we did recover: - assertTrue(replica.isActive()); - }) - ); - } - }, false, true); + recoverReplica( + replica, + primary, + (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, 0L, null, null, recoveryListener) { + @Override + public void indexTranslogOperations( + final List operations, + final int totalTranslogOps, + final long maxAutoIdTimestamp, + final long maxSeqNoOfUpdatesOrDeletes, + final RetentionLeases retentionLeases, + final long mappingVersion, + final ActionListener listener + ) { + super.indexTranslogOperations( + operations, + totalTranslogOps, + maxAutoIdTimestamp, + maxSeqNoOfUpdatesOrDeletes, + retentionLeases, + mappingVersion, + listener.delegateFailureAndWrap((l, checkpoint) -> { + l.onResponse(checkpoint); + // Shard should now be active since we did recover: + assertTrue(replica.isActive()); + }) + ); + } + }, + false, + true + ); closeShards(primary, replica); } @@ -3033,48 +3045,54 @@ public void testRefreshListenersDuringPeerRecovery() throws IOException { DiscoveryNode localNode = DiscoveryNodeUtils.builder("foo").roles(emptySet()).build(); replica.markAsRecovering("for testing", new RecoveryState(replica.routingEntry(), localNode, localNode)); assertListenerCalled.accept(replica); - recoverReplica(replica, primary, (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, null, null, recoveryListener) { - // we're only checking that listeners are called when the engine is open, before there is no point - @Override - public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { - super.prepareForTranslogOperations(totalTranslogOps, listener.delegateFailureAndWrap((l, r) -> { - assertListenerCalled.accept(replica); - l.onResponse(r); - })); - } - - @Override - public void indexTranslogOperations( - final List operations, - final int totalTranslogOps, - final long maxAutoIdTimestamp, - final long maxSeqNoOfUpdatesOrDeletes, - final RetentionLeases retentionLeases, - final long mappingVersion, - final ActionListener listener - ) { - super.indexTranslogOperations( - operations, - totalTranslogOps, - maxAutoIdTimestamp, - maxSeqNoOfUpdatesOrDeletes, - retentionLeases, - mappingVersion, - listener.delegateFailureAndWrap((l, r) -> { + recoverReplica( + replica, + primary, + (shard, discoveryNode) -> new RecoveryTarget(shard, discoveryNode, 0L, null, null, recoveryListener) { + // we're only checking that listeners are called when the engine is open, before there is no point + @Override + public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { + super.prepareForTranslogOperations(totalTranslogOps, listener.delegateFailureAndWrap((l, r) -> { assertListenerCalled.accept(replica); l.onResponse(r); - }) - ); - } + })); + } - @Override - public void finalizeRecovery(long globalCheckpoint, long trimAboveSeqNo, ActionListener listener) { - super.finalizeRecovery(globalCheckpoint, trimAboveSeqNo, listener.delegateFailureAndWrap((l, r) -> { - assertListenerCalled.accept(replica); - l.onResponse(r); - })); - } - }, false, true); + @Override + public void indexTranslogOperations( + final List operations, + final int totalTranslogOps, + final long maxAutoIdTimestamp, + final long maxSeqNoOfUpdatesOrDeletes, + final RetentionLeases retentionLeases, + final long mappingVersion, + final ActionListener listener + ) { + super.indexTranslogOperations( + operations, + totalTranslogOps, + maxAutoIdTimestamp, + maxSeqNoOfUpdatesOrDeletes, + retentionLeases, + mappingVersion, + listener.delegateFailureAndWrap((l, r) -> { + assertListenerCalled.accept(replica); + l.onResponse(r); + }) + ); + } + + @Override + public void finalizeRecovery(long globalCheckpoint, long trimAboveSeqNo, ActionListener listener) { + super.finalizeRecovery(globalCheckpoint, trimAboveSeqNo, listener.delegateFailureAndWrap((l, r) -> { + assertListenerCalled.accept(replica); + l.onResponse(r); + })); + } + }, + false, + true + ); closeShards(primary, replica); } @@ -4740,7 +4758,7 @@ public void onRecoveryFailure(RecoveryFailedException e, boolean sendShardFailur assert false : "Unexpected failure"; } }; - recoverReplica(replicaShard, primary, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, null, null, recoveryListener) { + recoverReplica(replicaShard, primary, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, 0L, null, null, recoveryListener) { @Override public void indexTranslogOperations( List operations, diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java index 1a372de6129a3..92d51b80326ae 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndexDescriptorTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.automaton.CharacterRunAutomaton; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.indices.SystemIndexDescriptor.Type; @@ -19,9 +20,9 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.util.List; -import java.util.Locale; import java.util.Map; +import static org.elasticsearch.indices.SystemIndexDescriptor.VERSION_META_KEY; import static org.elasticsearch.indices.SystemIndexDescriptor.findDynamicMapping; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -29,16 +30,26 @@ public class SystemIndexDescriptorTests extends ESTestCase { - private static final String MAPPINGS = String.format(Locale.ROOT, """ + private static final int TEST_MAPPINGS_VERSION = 10; + private static final int TEST_MAPPINGS_PRIOR_VERSION = 5; + private static final int TEST_MAPPINGS_NONEXISTENT_VERSION = 2; + + private static final String MAPPINGS_FORMAT_STRING = """ { "_doc": { "_meta": { "version": "7.4.0", - "%s": 1 + "%s": %d } } } - """, SystemIndexDescriptor.VERSION_META_KEY); + """; + + private static final String MAPPINGS = Strings.format( + MAPPINGS_FORMAT_STRING, + SystemIndexDescriptor.VERSION_META_KEY, + TEST_MAPPINGS_VERSION + ); /** * Tests the various validation rules that are applied when creating a new system index descriptor. @@ -261,7 +272,7 @@ public void testGetDescriptorCompatibleWith() { .setAliasName(".system") .setType(Type.INTERNAL_MANAGED) .setSettings(Settings.EMPTY) - .setMappings(MAPPINGS) + .setMappings(Strings.format(MAPPINGS_FORMAT_STRING, VERSION_META_KEY, TEST_MAPPINGS_PRIOR_VERSION)) .setVersionMetaKey("version") .setOrigin("system") .setMinimumNodeVersion(Version.V_7_0_0) @@ -282,7 +293,11 @@ public void testGetDescriptorCompatibleWith() { SystemIndexDescriptor compat = descriptor.getDescriptorCompatibleWith(Version.CURRENT); assertSame(descriptor, compat); + compat = descriptor.getDescriptorCompatibleWith(descriptor.getMappingsVersion()); + assertSame(descriptor, compat); + assertNull(descriptor.getDescriptorCompatibleWith(Version.fromString("6.8.0"))); + assertNull(descriptor.getDescriptorCompatibleWith(new SystemIndexDescriptor.MappingsVersion(TEST_MAPPINGS_NONEXISTENT_VERSION, 1))); compat = descriptor.getDescriptorCompatibleWith(Version.CURRENT.minimumCompatibilityVersion()); assertSame(descriptor, compat); @@ -291,10 +306,22 @@ public void testGetDescriptorCompatibleWith() { compat = descriptor.getDescriptorCompatibleWith(priorToMin); assertSame(prior, compat); + SystemIndexDescriptor.MappingsVersion priorToMinMappingsVersion = new SystemIndexDescriptor.MappingsVersion( + TEST_MAPPINGS_PRIOR_VERSION, + 1 + ); + compat = descriptor.getDescriptorCompatibleWith(priorToMinMappingsVersion); + assertSame(prior, compat); + compat = descriptor.getDescriptorCompatibleWith( VersionUtils.randomVersionBetween(random(), prior.getMinimumNodeVersion(), priorToMin) ); assertSame(prior, compat); + + compat = descriptor.getDescriptorCompatibleWith( + new SystemIndexDescriptor.MappingsVersion(randomIntBetween(TEST_MAPPINGS_PRIOR_VERSION, TEST_MAPPINGS_VERSION - 1), 1) + ); + assertSame(prior, compat); } public void testSystemIndicesMustBeHidden() { @@ -368,7 +395,7 @@ public void testUnmanagedIndexMappingsVersion() { // test mapping versions can't be negative public void testNegativeMappingsVersion() { int negativeVersion = randomIntBetween(Integer.MIN_VALUE, -1); - String mappings = String.format(Locale.ROOT, """ + String mappings = Strings.format(""" { "_doc": { "_meta": { @@ -415,20 +442,8 @@ public void testHashesIgnoreMappingMetadata() { } """; - String mappings1 = String.format( - Locale.ROOT, - mappingFormatString, - "8.9.0", - SystemIndexDescriptor.VERSION_META_KEY, - randomIntBetween(1, 10) - ); - String mappings2 = String.format( - Locale.ROOT, - mappingFormatString, - "8.10.0", - SystemIndexDescriptor.VERSION_META_KEY, - randomIntBetween(11, 20) - ); + String mappings1 = Strings.format(mappingFormatString, "8.9.0", SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(1, 10)); + String mappings2 = Strings.format(mappingFormatString, "8.10.0", SystemIndexDescriptor.VERSION_META_KEY, randomIntBetween(11, 20)); SystemIndexDescriptor descriptor1 = priorSystemIndexDescriptorBuilder().setMappings(mappings1).build(); SystemIndexDescriptor descriptor2 = priorSystemIndexDescriptorBuilder().setMappings(mappings2).build(); diff --git a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java index b3492b203d354..925fadd511a79 100644 --- a/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/SystemIndexMappingUpdateServiceTests.java @@ -207,13 +207,16 @@ public void testManagerSkipsIndicesWithUpToDateMappings() { ); } + // TODO[wrb]: add test where we have the old mappings version but not the new one + // Is this where we "placeholder" a "distant future" version string? + /** * Check that the manager will try to upgrade indices where their mappings are out-of-date. */ public void testManagerProcessesIndicesWithOutdatedMappings() { assertThat( SystemIndexMappingUpdateService.getUpgradeStatus( - markShardsAvailable(createClusterState(Strings.toString(getMappings("1.0.0")))), + markShardsAvailable(createClusterState(Strings.toString(getMappings("1.0.0", 4)))), DESCRIPTOR ), equalTo(UpgradeStatus.NEEDS_MAPPINGS_UPDATE) @@ -239,7 +242,7 @@ public void testManagerProcessesIndicesWithNullMetadata() { public void testManagerProcessesIndicesWithNullVersionMetadata() { assertThat( SystemIndexMappingUpdateService.getUpgradeStatus( - markShardsAvailable(createClusterState(Strings.toString(getMappings((String) null)))), + markShardsAvailable(createClusterState(Strings.toString(getMappings((String) null, null)))), DESCRIPTOR ), equalTo(UpgradeStatus.NEEDS_MAPPINGS_UPDATE) @@ -253,7 +256,7 @@ public void testManagerSubmitsPutRequest() { SystemIndices systemIndices = new SystemIndices(List.of(FEATURE)); SystemIndexMappingUpdateService manager = new SystemIndexMappingUpdateService(systemIndices, client); - manager.clusterChanged(event(markShardsAvailable(createClusterState(Strings.toString(getMappings("1.0.0")))))); + manager.clusterChanged(event(markShardsAvailable(createClusterState(Strings.toString(getMappings("1.0.0", 4)))))); verify(client, times(1)).execute(any(PutMappingAction.class), any(PutMappingRequest.class), any()); } @@ -405,13 +408,13 @@ private static Settings getSettings() { } private static XContentBuilder getMappings() { - return getMappings(Version.CURRENT.toString()); + return getMappings(Version.CURRENT.toString(), 6); } - private static XContentBuilder getMappings(String version) { + private static XContentBuilder getMappings(String nodeVersion, Integer mappingsVersion) { return getMappings(builder -> builder.object("_meta", meta -> { - meta.field("version", version); - meta.field(SystemIndexDescriptor.VERSION_META_KEY, 5); + meta.field("version", nodeVersion); + meta.field(SystemIndexDescriptor.VERSION_META_KEY, mappingsVersion); })); } diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 9e1bcf10a8ab4..7d5098ab2a739 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -229,7 +229,7 @@ public MockIndexService indexService(Index index) { } @Override - public MockIndexShard createShard( + public void createShard( final ShardRouting shardRouting, final PeerRecoveryTargetService recoveryTargetService, final PeerRecoveryTargetService.RecoveryListener recoveryListener, @@ -238,21 +238,18 @@ public MockIndexShard createShard( final GlobalCheckpointSyncer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final DiscoveryNode targetNode, - final DiscoveryNode sourceNode + final DiscoveryNode sourceNode, + long clusterStateVersion ) throws IOException { failRandomly(); RecoveryState recoveryState = new RecoveryState(shardRouting, targetNode, sourceNode); MockIndexService indexService = indexService(recoveryState.getShardId().getIndex()); MockIndexShard indexShard = indexService.createShard(shardRouting); indexShard.recoveryState = recoveryState; - return indexShard; } @Override - public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) throws IOException, - InterruptedException { - - } + public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeValue) {} private boolean hasIndex(Index index) { return indices.containsKey(index.getUUID()); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index e547a736df034..e6223ff3e2bef 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -96,10 +96,10 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.snapshots.EmptySnapshotsInfoService; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.gateway.TestGatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java index a18e7e8ce46f9..ecb0b4cf2d828 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoverySourceServiceTests.java @@ -43,6 +43,7 @@ public void testDuplicateRecoveries() throws IOException { PeerRecoverySourceService peerRecoverySourceService = new PeerRecoverySourceService( transportService, indicesService, + clusterService, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), mock(RecoveryPlannerService.class) ); @@ -51,6 +52,7 @@ public void testDuplicateRecoveries() throws IOException { randomAlphaOfLength(10), getFakeDiscoNode("source"), getFakeDiscoNode("target"), + 0L, Store.MetadataSnapshot.EMPTY, randomBoolean(), randomLong(), diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java index 4305af1a1e3b4..16832aa07ccd6 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -100,7 +100,7 @@ public void testWriteFileChunksConcurrently() throws Exception { final DiscoveryNode pNode = getFakeDiscoNode(sourceShard.routingEntry().currentNodeId()); final DiscoveryNode rNode = getFakeDiscoNode(targetShard.routingEntry().currentNodeId()); targetShard.markAsRecovering("test-peer-recovery", new RecoveryState(targetShard.routingEntry(), rNode, pNode)); - final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, null, null, null); + final RecoveryTarget recoveryTarget = new RecoveryTarget(targetShard, null, 0L, null, null, null); final PlainActionFuture receiveFileInfoFuture = new PlainActionFuture<>(); recoveryTarget.receiveFileInfo( mdFiles.stream().map(StoreFileMetadata::name).toList(), @@ -330,7 +330,7 @@ public void testResetStartingSeqNoIfLastCommitCorrupted() throws Exception { shard.prepareForIndexRecovery(); long startingSeqNo = shard.recoverLocallyUpToGlobalCheckpoint(); shard.store().markStoreCorrupted(new IOException("simulated")); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, null, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, null, null, null); StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest(logger, rNode, recoveryTarget, startingSeqNo); assertThat(request.startingSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); assertThat(request.metadataSnapshot().size(), equalTo(0)); @@ -348,7 +348,7 @@ public void testMarkDoneFailureIsPropagated() throws Exception { shard.prepareForIndexRecovery(); PlainActionFuture future = PlainActionFuture.newFuture(); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, null, new PeerRecoveryTargetService.RecoveryListener() { + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, null, null, new PeerRecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { future.onResponse(null); @@ -388,7 +388,7 @@ public void testResetStartRequestIfTranslogIsCorrupted() throws Exception { shard = reinitShard(shard, ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.PeerRecoverySource.INSTANCE)); shard.markAsRecovering("peer recovery", new RecoveryState(shard.routingEntry(), pNode, rNode)); shard.prepareForIndexRecovery(); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, null, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, null, null, null); StartRecoveryRequest request = PeerRecoveryTargetService.getStartRecoveryRequest( logger, rNode, @@ -456,7 +456,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { recoveryStateIndex.addFileDetail(storeFileMetadata.name(), storeFileMetadata.length(), false); recoveryStateIndex.setFileDetailsComplete(); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, snapshotFilesProvider, () -> {}, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, snapshotFilesProvider, () -> {}, null); PlainActionFuture writeSnapshotFileFuture = PlainActionFuture.newFuture(); recoveryTarget.restoreFileFromSnapshot(repositoryName, indexId, fileInfo, writeSnapshotFileFuture); @@ -528,7 +528,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { recoveryStateIndex.addFileDetail(storeFileMetadata.name(), storeFileMetadata.length(), false); recoveryStateIndex.setFileDetailsComplete(); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, snapshotFilesProvider, () -> {}, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, snapshotFilesProvider, () -> {}, null); String repositoryName = "repo"; IndexId indexId = new IndexId("index", "uuid"); @@ -635,7 +635,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { } }; - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, snapshotFilesProvider, () -> {}, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, snapshotFilesProvider, () -> {}, null); String[] fileNamesBeforeRecoveringSnapshotFiles = directory.listAll(); @@ -701,7 +701,7 @@ public int getReadSnapshotFileBufferSizeForRepo(String repository) { recoveryStateIndex.addFileDetail(storeFileMetadata.name(), storeFileMetadata.length(), false); recoveryStateIndex.setFileDetailsComplete(); - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, snapshotFilesProvider, () -> {}, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, snapshotFilesProvider, () -> {}, null); String repository = "repo"; IndexId indexId = new IndexId("index", "uuid"); @@ -749,7 +749,7 @@ public void testSnapshotFileDownloadPermitIsReleasedAfterClosingRecoveryTarget() Releasable snapshotFileDownloadsPermit = () -> { assertThat(snapshotFileDownloadsPermitFlag.compareAndSet(false, true), is(equalTo(true))); }; - RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, null, snapshotFileDownloadsPermit, null); + RecoveryTarget recoveryTarget = new RecoveryTarget(shard, null, 0L, null, snapshotFileDownloadsPermit, null); recoveryTarget.decRef(); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index a46cd75dfd493..d2f94ff2d344a 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -233,7 +233,7 @@ public void writeFileChunk( IOUtils.close(reader, store, multiFileWriter, targetStore); } - public StartRecoveryRequest getStartRecoveryRequest() throws IOException { + public StartRecoveryRequest getStartRecoveryRequest() { Store.MetadataSnapshot metadataSnapshot = randomBoolean() ? Store.MetadataSnapshot.EMPTY : new Store.MetadataSnapshot( @@ -246,6 +246,7 @@ public StartRecoveryRequest getStartRecoveryRequest() throws IOException { null, DiscoveryNodeUtils.builder("b").roles(emptySet()).build(), DiscoveryNodeUtils.builder("b").roles(emptySet()).build(), + 0L, metadataSnapshot, randomBoolean(), randomNonNegativeLong(), diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index ecb1e2ccde132..20e85c9c6fed8 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -311,7 +311,7 @@ public void testPeerRecoverySendSafeCommitInFileBased() throws Exception { } IndexShard replicaShard = newShard(primaryShard.shardId(), false); updateMappings(replicaShard, primaryShard.indexSettings().getIndexMetadata()); - recoverReplica(replicaShard, primaryShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, null, null, recoveryListener) { + recoverReplica(replicaShard, primaryShard, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, 0L, null, null, recoveryListener) { @Override public void prepareForTranslogOperations(int totalTranslogOps, ActionListener listener) { super.prepareForTranslogOperations(totalTranslogOps, listener); @@ -432,7 +432,7 @@ public long addDocument(Iterable doc) throws IOExcepti allowShardFailures(); IndexShard replica = group.addReplica(); expectThrows(Exception.class, () -> group.recoverReplica(replica, (shard, sourceNode) -> { - return new RecoveryTarget(shard, sourceNode, null, null, new PeerRecoveryTargetService.RecoveryListener() { + return new RecoveryTarget(shard, sourceNode, 0L, null, null, new PeerRecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state, ShardLongFieldRange timestampMillisFieldRange) { throw new AssertionError("recovery must fail"); diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java index cf98626f26e1e..47d3777573c4f 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/StartRecoveryRequestTests.java @@ -9,11 +9,10 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.seqno.SequenceNumbers; @@ -22,8 +21,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; import java.util.Collections; import static java.util.Collections.emptySet; @@ -47,6 +44,7 @@ public void testSerialization() throws Exception { UUIDs.randomBase64UUID(), DiscoveryNodeUtils.builder("a").roles(emptySet()).version(targetNodeVersion, IndexVersion.ZERO, IndexVersion.current()).build(), DiscoveryNodeUtils.builder("b").roles(emptySet()).version(targetNodeVersion, IndexVersion.ZERO, IndexVersion.current()).build(), + randomNonNegativeLong(), metadataSnapshot, randomBoolean(), randomNonNegativeLong(), @@ -54,15 +52,12 @@ public void testSerialization() throws Exception { randomBoolean() ); - final ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); - final OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(serializationVersion); - outRequest.writeTo(out); - - final ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); - InputStreamStreamInput in = new InputStreamStreamInput(inBuffer); - in.setTransportVersion(serializationVersion); - final StartRecoveryRequest inRequest = new StartRecoveryRequest(in); + final StartRecoveryRequest inRequest = copyWriteable( + outRequest, + writableRegistry(), + StartRecoveryRequest::new, + serializationVersion + ); assertThat(outRequest.shardId(), equalTo(inRequest.shardId())); assertThat(outRequest.targetAllocationId(), equalTo(inRequest.targetAllocationId())); @@ -72,6 +67,12 @@ public void testSerialization() throws Exception { assertThat(outRequest.isPrimaryRelocation(), equalTo(inRequest.isPrimaryRelocation())); assertThat(outRequest.recoveryId(), equalTo(inRequest.recoveryId())); assertThat(outRequest.startingSeqNo(), equalTo(inRequest.startingSeqNo())); + + if (serializationVersion.onOrAfter(TransportVersions.WAIT_FOR_CLUSTER_STATE_IN_RECOVERY_ADDED)) { + assertEquals(outRequest.clusterStateVersion(), inRequest.clusterStateVersion()); + } else { + assertEquals(0L, inRequest.clusterStateVersion()); + } } public void testDescription() { @@ -79,13 +80,14 @@ public void testDescription() { assertEquals( "recovery of [index][0] to " + node.descriptionWithoutAttributes() - + " [recoveryId=1, targetAllocationId=allocationId, startingSeqNo=-2, " + + " [recoveryId=1, targetAllocationId=allocationId, clusterStateVersion=3, startingSeqNo=-2, " + "primaryRelocation=false, canDownloadSnapshotFiles=true]", new StartRecoveryRequest( new ShardId("index", "uuid", 0), "allocationId", null, node, + 3, Store.MetadataSnapshot.EMPTY, false, 1, diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationActionTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationActionTests.java index f591be2d3255a..eae982b083488 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationActionTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/StatelessPrimaryRelocationActionTests.java @@ -32,7 +32,8 @@ protected StatelessPrimaryRelocationAction.Request createTestInstance() { randomNonNegativeLong(), new ShardId(randomIdentifier(), UUIDs.randomBase64UUID(), randomIntBetween(0, 99)), newDiscoveryNode(), - UUIDs.randomBase64UUID() + UUIDs.randomBase64UUID(), + randomNonNegativeLong() ); } @@ -43,30 +44,41 @@ private static DiscoveryNode newDiscoveryNode() { @Override protected StatelessPrimaryRelocationAction.Request mutateInstance(StatelessPrimaryRelocationAction.Request instance) throws IOException { - return switch (between(1, 4)) { + return switch (between(1, 5)) { case 1 -> new StatelessPrimaryRelocationAction.Request( randomValueOtherThan(instance.recoveryId(), ESTestCase::randomNonNegativeLong), instance.shardId(), instance.targetNode(), - instance.targetAllocationId() + instance.targetAllocationId(), + instance.clusterStateVersion() ); case 2 -> new StatelessPrimaryRelocationAction.Request( instance.recoveryId(), ShardIdTests.mutate(instance.shardId()), instance.targetNode(), - instance.targetAllocationId() + instance.targetAllocationId(), + instance.clusterStateVersion() ); case 3 -> new StatelessPrimaryRelocationAction.Request( instance.recoveryId(), instance.shardId(), randomValueOtherThan(instance.targetNode(), StatelessPrimaryRelocationActionTests::newDiscoveryNode), - instance.targetAllocationId() + instance.targetAllocationId(), + instance.clusterStateVersion() ); case 4 -> new StatelessPrimaryRelocationAction.Request( instance.recoveryId(), instance.shardId(), instance.targetNode(), - randomValueOtherThan(instance.targetAllocationId(), UUIDs::randomBase64UUID) + randomValueOtherThan(instance.targetAllocationId(), UUIDs::randomBase64UUID), + instance.clusterStateVersion() + ); + case 5 -> new StatelessPrimaryRelocationAction.Request( + instance.recoveryId(), + instance.shardId(), + instance.targetNode(), + instance.targetAllocationId(), + randomValueOtherThan(instance.clusterStateVersion(), ESTestCase::randomNonNegativeLong) ); default -> throw new AssertionError("impossible"); }; diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 0a0040eebea39..87f5fc6e990f6 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -46,12 +47,12 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.TransportService; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.ContextParser; @@ -93,7 +94,17 @@ public class NodeTests extends ESTestCase { public static class CheckPlugin extends Plugin { - public static final BootstrapCheck CHECK = context -> BootstrapCheck.BootstrapCheckResult.success(); + public static final BootstrapCheck CHECK = new BootstrapCheck() { + @Override + public BootstrapCheckResult check(BootstrapContext context) { + return BootstrapCheck.BootstrapCheckResult.success(); + } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECKS; + } + }; @Override public List getBootstrapChecks() { diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java index a6142499bc894..693b7b0fb0981 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginIntrospectorTests.java @@ -32,12 +32,12 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PrivilegedOperations; import org.elasticsearch.test.compiler.InMemoryJavaCompiler; import org.elasticsearch.test.jar.JarUtils; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index eca3afa6c5ebb..72aba521f1b79 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -705,10 +705,10 @@ public void testLoadServiceProvidersInSameClassLoader() { .instance(); // We shouldn't find the FooTestService implementation with PluginOther - assertThat(MockPluginsService.createExtensions(TestService.class, othPlugin), empty()); + assertThat(MockPluginsService.createExtensions(TestService.class, othPlugin, e -> false), empty()); // We should find the FooTestService implementation when we use FooPlugin, because it matches the constructor arg. - var providers = MockPluginsService.createExtensions(TestService.class, fooPlugin); + var providers = MockPluginsService.createExtensions(TestService.class, fooPlugin, e -> false); assertThat(providers, allOf(hasSize(1), everyItem(instanceOf(BarTestService.class)))); } diff --git a/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java b/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java index b97a841c00118..4896f118a1327 100644 --- a/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java +++ b/server/src/test/java/org/elasticsearch/recovery/RecoveriesCollectionTests.java @@ -160,6 +160,6 @@ long startRecovery( final DiscoveryNode rNode = getDiscoveryNode(indexShard.routingEntry().currentNodeId()); indexShard.markAsRecovering("remote", new RecoveryState(indexShard.routingEntry(), sourceNode, rNode)); indexShard.prepareForIndexRecovery(); - return collection.startRecovery(indexShard, sourceNode, null, listener, timeValue, null); + return collection.startRecovery(indexShard, sourceNode, 0L, null, listener, timeValue, null); } } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java index 78f1e6c46956e..624ad6a9fc7da 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -9,8 +9,10 @@ package org.elasticsearch.repositories.blobstore; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -24,6 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexVersion; @@ -36,6 +39,7 @@ import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; +import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.repositories.ShardGeneration; import org.elasticsearch.repositories.ShardGenerations; import org.elasticsearch.repositories.SnapshotShardContext; @@ -46,6 +50,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.junit.After; import java.io.IOException; import java.nio.file.Path; @@ -55,7 +60,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CyclicBarrier; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.Function; import java.util.stream.Collectors; @@ -75,6 +82,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase { static final String REPO_TYPE = "fsLike"; + private static final String TEST_REPO_NAME = "test-repo"; protected Collection> getPlugins() { return Arrays.asList(FsLikeRepoPlugin.class); @@ -106,12 +114,11 @@ protected void assertSnapshotOrGenericThread() { public void testRetrieveSnapshots() throws Exception { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); - final String repositoryName = "test-repo"; logger.info("--> creating repository"); AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(Settings.builder().put(node().settings()).put("location", location)) .get(); @@ -131,7 +138,7 @@ public void testRetrieveSnapshots() throws Exception { logger.info("--> create first snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, "test-snap-1") + .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-1") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -140,7 +147,7 @@ public void testRetrieveSnapshots() throws Exception { logger.info("--> create second snapshot"); createSnapshotResponse = client.admin() .cluster() - .prepareCreateSnapshot(repositoryName, "test-snap-2") + .prepareCreateSnapshot(TEST_REPO_NAME, "test-snap-2") .setWaitForCompletion(true) .setIndices(indexName) .get(); @@ -148,7 +155,7 @@ public void testRetrieveSnapshots() throws Exception { logger.info("--> make sure the node's repository can resolve the snapshots"); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(TEST_REPO_NAME); final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2); List snapshotIds = ESBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) @@ -255,13 +262,12 @@ public void testRepositoryDataConcurrentModificationNotAllowed() throws Exceptio public void testBadChunksize() throws Exception { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); - final String repositoryName = "test-repo"; expectThrows( RepositoryException.class, () -> client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings( Settings.builder() @@ -345,7 +351,6 @@ private static void writeIndexGen(BlobStoreRepository repository, RepositoryData private BlobStoreRepository setupRepo() { final Client client = client(); final Path location = ESIntegTestCase.randomRepoPath(node().settings()); - final String repositoryName = "test-repo"; Settings.Builder repoSettings = Settings.builder().put(node().settings()).put("location", location); boolean compress = randomBoolean(); @@ -354,20 +359,29 @@ private BlobStoreRepository setupRepo() { } AcknowledgedResponse putRepositoryResponse = client.admin() .cluster() - .preparePutRepository(repositoryName) + .preparePutRepository(TEST_REPO_NAME) .setType(REPO_TYPE) .setSettings(repoSettings) .setVerify(false) // prevent eager reading of repo data - .get(); + .get(TimeValue.timeValueSeconds(10)); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(TEST_REPO_NAME); assertThat("getBlobContainer has to be lazy initialized", repository.getBlobContainer(), nullValue()); assertEquals("Compress must be set to", compress, repository.isCompress()); return repository; } + @After + public void removeRepo() { + try { + client().admin().cluster().prepareDeleteRepository(TEST_REPO_NAME).get(TimeValue.timeValueSeconds(10)); + } catch (RepositoryMissingException e) { + // ok, not all tests create the test repo + } + } + private RepositoryData addRandomSnapshotsToRepoData(RepositoryData repoData, boolean inclIndices) { int numSnapshots = randomIntBetween(1, 20); for (int i = 0; i < numSnapshots; i++) { @@ -441,6 +455,32 @@ protected void snapshotFile(SnapshotShardContext context, BlobStoreIndexShardSna listenerCalled.get(); } + public void testGetRepositoryDataThreadContext() { + final var future = new PlainActionFuture(); + try (var listeners = new RefCountingListener(future)) { + final var repo = setupRepo(); + final int threads = between(1, 5); + final var barrier = new CyclicBarrier(threads); + final var headerName = "test-header"; + final var threadPool = client().threadPool(); + final var threadContext = threadPool.getThreadContext(); + for (int i = 0; i < threads; i++) { + final var headerValue = randomAlphaOfLength(10); + try (var ignored = threadContext.stashContext()) { + threadContext.putHeader(headerName, headerValue); + threadPool.generic().execute(ActionRunnable.wrap(listeners.acquire(), l -> { + safeAwait(barrier); + repo.getRepositoryData(l.map(repositoryData -> { + assertEquals(headerValue, threadContext.getHeader(headerName)); + return null; + })); + })); + } + } + } + future.actionGet(10, TimeUnit.SECONDS); + } + private Environment createEnvironment() { Path home = createTempDir(); return TestEnvironment.newEnvironment( diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index ddbc1f33cacc6..068e5933bee1e 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -35,10 +35,10 @@ import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpNodeClient; import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.usage.UsageService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java index 7bdb4f7120282..204596cabdfe3 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestHttpResponseHeadersTests.java @@ -14,10 +14,10 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.rest.RestHandler.Route; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.usage.UsageService; import java.util.ArrayList; diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java index 7ac44cf862e49..7000f1a153ac6 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -27,11 +27,11 @@ import org.elasticsearch.search.AbstractSearchTestCase; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.usage.UsageService; import org.elasticsearch.xcontent.XContentType; diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 67a34bf4a08e9..1b5ff3f39be22 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -178,9 +178,9 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.transport.DisruptableMockTransport; import org.elasticsearch.transport.TransportService; @@ -1829,6 +1829,7 @@ protected void assertSnapshotOrGenericThread() { peerRecoverySourceService = new PeerRecoverySourceService( transportService, indicesService, + clusterService, recoverySettings, PeerOnlyRecoveryPlannerService.INSTANCE ); @@ -1961,7 +1962,6 @@ protected void assertSnapshotOrGenericThread() { repositoriesService, allocationService, metadataCreateIndexService, - new MetadataDeleteIndexService(settings, clusterService, allocationService), new IndexMetadataVerifier( settings, clusterService, diff --git a/server/src/test/java/org/elasticsearch/tasks/TaskManagerTests.java b/server/src/test/java/org/elasticsearch/tasks/TaskManagerTests.java index dc5f7bdfe1c5b..05150cd5dd362 100644 --- a/server/src/test/java/org/elasticsearch/tasks/TaskManagerTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/TaskManagerTests.java @@ -25,10 +25,10 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.FakeTcpChannel; import org.elasticsearch.transport.TcpChannel; import org.elasticsearch.transport.TcpTransportChannel; diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index bd4be3f3bf49c..53ad4188b6ada 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -32,12 +32,12 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.junit.After; import org.junit.Before; diff --git a/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java b/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java index 56bfe72241f28..fb4c9df512a5a 100644 --- a/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SingleResultDeduplicatorTests.java @@ -10,10 +10,20 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.SingleResultDeduplicator; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; public class SingleResultDeduplicatorTests extends ESTestCase { @@ -74,4 +84,45 @@ public void onFailure(Exception e) { assertTrue(called[i]); } } + + public void testThreadContextPreservation() { + final var resources = new Releasable[1]; + try { + final var future = new PlainActionFuture(); + try (var listeners = new RefCountingListener(future)) { + final var threadContext = new ThreadContext(Settings.EMPTY); + final var deduplicator = new SingleResultDeduplicator(threadContext, l -> l.onResponse(null)); + final var threads = between(1, 5); + final var executor = EsExecutors.newFixed( + "test", + threads, + 0, + EsExecutors.daemonThreadFactory("test"), + threadContext, + EsExecutors.TaskTrackingConfig.DO_NOT_TRACK + ); + resources[0] = () -> ThreadPool.terminate(executor, 10, TimeUnit.SECONDS); + final var barrier = new CyclicBarrier(threads); + final var headerName = "test-header"; + for (int i = 0; i < threads; i++) { + try (var ignored = threadContext.stashContext()) { + final var headerValue = randomAlphaOfLength(10); + threadContext.putHeader(headerName, headerValue); + executor.execute( + ActionRunnable.wrap( + listeners.acquire(v -> assertEquals(headerValue, threadContext.getHeader(headerName))), + listener -> { + safeAwait(barrier); + deduplicator.execute(listener); + } + ) + ); + } + } + } + future.actionGet(10, TimeUnit.SECONDS); + } finally { + Releasables.closeExpectNoException(resources); + } + } } diff --git a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java index 80a06c55f2349..2ff2b655cc21a 100644 --- a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java +++ b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekTrackerPlugin.java @@ -31,8 +31,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 853fec20e6aef..c40df091a4521 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import java.util.ArrayList; @@ -535,7 +536,7 @@ public static ClusterState state( ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); state.nodes(discoBuilder); for (DiscoveryNode node : allNodes) { - state.putTransportVersion(node.getId(), transportVersion); + state.putCompatibilityVersions(node.getId(), transportVersion, SystemIndices.SERVER_SYSTEM_MAPPINGS_VERSIONS); } Metadata.Builder metadataBuilder = Metadata.builder().generateClusterUuidIfNeeded(); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index 0466e565ad95a..8c60045b13ede 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -523,7 +523,7 @@ public static MetadataRolloverService getMetadataRolloverService( false, new IndexSettingProviders(providers) ); - MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService(clusterService, indicesService, null, registry); + MetadataIndexAliasesService indexAliasesService = new MetadataIndexAliasesService(clusterService, indicesService, registry); return new MetadataRolloverService( testThreadPool, createIndexService, diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 9d607da6a3873..c2ce750f155cf 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -451,7 +451,7 @@ public synchronized boolean removeReplica(IndexShard replica) throws IOException } public void recoverReplica(IndexShard replica) throws IOException { - recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, null, null, recoveryListener)); + recoverReplica(replica, (r, sourceNode) -> new RecoveryTarget(r, sourceNode, 0L, null, null, recoveryListener)); } public void recoverReplica(IndexShard replica, BiFunction targetSupplier) diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 5a6d8bb878af8..f36951cec0b84 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -751,7 +751,7 @@ protected void recoverReplica(IndexShard replica, IndexShard primary, boolean st recoverReplica( replica, primary, - (r, sourceNode) -> new RecoveryTarget(r, sourceNode, null, null, recoveryListener), + (r, sourceNode) -> new RecoveryTarget(r, sourceNode, 0L, null, null, recoveryListener), true, startReplica ); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index e97d21ae53697..fe2d6783b4b01 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -40,11 +40,11 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.transport.TransportService; diff --git a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java index 48c5fa8c84415..686105f9ed74e 100644 --- a/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java +++ b/test/framework/src/main/java/org/elasticsearch/plugins/MockPluginsService.java @@ -23,11 +23,11 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; +import java.util.function.Predicate; public class MockPluginsService extends PluginsService { @@ -92,9 +92,10 @@ public PluginsAndModules info() { @Override @SuppressWarnings({ "rawtypes", "unchecked" }) public List loadServiceProviders(Class service) { - // We use a set here to avoid duplicates because SPIClassIterator will match + // We use a map here to avoid duplicates because SPIClassIterator will match // all plugins in MockNode, because all plugins are loaded by the same class loader. - Set result = new HashSet<>(); + // Each entry in the map is a unique service provider implementation. + Map, T> result = new HashMap<>(); for (LoadedPlugin pluginTuple : plugins()) { var plugin = pluginTuple.instance(); var classLoader = plugin.getClass().getClassLoader(); @@ -105,32 +106,47 @@ public List loadServiceProviders(Class service) { var res = new ArrayList>(); SPIClassIterator.get(service, classLoader).forEachRemaining(res::add); return List.copyOf(res); - }).iterator()); + }).iterator(), result::containsKey); } else { - extension = createExtensions(service, plugin); + extension = createExtensions(service, plugin, result::containsKey); } - result.addAll(extension); + extension.forEach(e -> result.put(e.getClass(), e)); } - return List.copyOf(result); + return List.copyOf(result.values()); } /** * When we load tests with MockNode, all plugins are loaded with the same class loader, * which breaks loading service providers with our SPIClassIterator. Since all plugins are * loaded in the same class loader, we find all plugins for any class found by the SPIClassIterator - * causing us to pass wrong plugin type to createExtension. This modified createExtensions, checks for - * the type and returns an empty list if the plugin class type is incompatible. + * causing us to pass plugin types to createExtension that aren't actually part of that plugin. + * This modified createExtensions, checks for the type and returns an empty list if the + * plugin class type is incompatible. It also skips loading extension types that have already + * been loaded, so that duplicates are not created. */ - static List createExtensions(Class extensionPointType, Plugin plugin) { + static List createExtensions( + Class extensionPointType, + Plugin plugin, + Predicate> loadedPredicate + ) { Iterator> classIterator = SPIClassIterator.get(extensionPointType, plugin.getClass().getClassLoader()); - return createExtensions(extensionPointType, plugin, classIterator); + return createExtensions(extensionPointType, plugin, classIterator, loadedPredicate); } - private static List createExtensions(Class extensionPointType, Plugin plugin, Iterator> classIterator) { + private static List createExtensions( + Class extensionPointType, + Plugin plugin, + Iterator> classIterator, + Predicate> loadedPredicate + ) { List extensions = new ArrayList<>(); while (classIterator.hasNext()) { Class extensionClass = classIterator.next(); + if (loadedPredicate.test(extensionClass)) { + // skip extensions that have already been loaded + continue; + } @SuppressWarnings("unchecked") Constructor[] constructors = (Constructor[]) extensionClass.getConstructors(); diff --git a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java index a9d78d36fdb96..ebc5ca4cd0fd3 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java +++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java @@ -22,8 +22,8 @@ import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.io.IOException; import java.util.HashMap; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java index 80c1f0e700e62..fab33eca838b4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ClusterServiceUtils.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.util.Throwables; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; @@ -28,14 +27,15 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.util.Collections; import java.util.concurrent.CountDownLatch; @@ -126,7 +126,7 @@ public static ClusterService createClusterService(ThreadPool threadPool, Discove clusterService.setNodeConnectionsService(createNoOpNodeConnectionsService()); ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterServiceUtils.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) - .putTransportVersion(localNode.getId(), TransportVersion.current()) + .putCompatibilityVersions(localNode.getId(), CompatibilityVersionsUtils.staticCurrent()) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); clusterService.getClusterApplierService().setInitialState(initialClusterState); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index bdf3a1e8b5018..540ef4cf1027b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -2035,4 +2035,8 @@ protected static boolean isTurkishLocale() { return Locale.getDefault().getLanguage().equals(new Locale("tr").getLanguage()) || Locale.getDefault().getLanguage().equals(new Locale("az").getLanguage()); } + + public static void fail(Throwable t, String msg, Object... args) { + throw new AssertionError(org.elasticsearch.common.Strings.format(msg, args), t); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java index a130ee136d0be..385778abdbc5c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java +++ b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java @@ -32,8 +32,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java index e15a4e4ec3ece..5f8c6ace00246 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java @@ -17,9 +17,9 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpNodeClient; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.usage.UsageService; import org.junit.After; import org.junit.Before; diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java index 0e89df35eca7a..4fcf5b64d9251 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java @@ -17,8 +17,8 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskAwareRequest; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.util.Collection; import java.util.Set; diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java index 43184c5a220f9..003912b32c5a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransport.java @@ -21,8 +21,8 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.CloseableConnection; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.RemoteTransportException; diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 43cb5b78c1f9f..65b004856ab0f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -37,10 +37,10 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchModule; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.tasks.MockTaskManager; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.BytesTransportRequest; import org.elasticsearch.transport.ClusterConnectionManager; import org.elasticsearch.transport.ConnectTransportException; diff --git a/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java index ff35c50891cf4..ddbea2c562db8 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/DisruptableMockTransport.java @@ -21,9 +21,9 @@ import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.Nullable; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import java.io.IOException; import java.util.ArrayList; diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java index dbefc3989ce45..595dd48aaf32b 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/AnalyticsPlugin.java @@ -25,8 +25,8 @@ import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.analytics.action.AnalyticsInfoTransportAction; diff --git a/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java b/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java index ffc2062b237fb..9d402eef3b55e 100644 --- a/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java +++ b/x-pack/plugin/async/src/main/java/org/elasticsearch/xpack/async/AsyncResultsIndexPlugin.java @@ -23,8 +23,8 @@ import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.async.AsyncTaskIndexService; diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java index 155c167c57d1d..cd95958e31b3e 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java @@ -36,8 +36,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index ec98a7f9123c2..63fcabb98672a 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -50,10 +50,10 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 78132749e2923..e37bd879ec943 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -433,7 +433,7 @@ protected EngineFactory getEngineFactory(ShardRouting routing) { // operations between the local checkpoint and max_seq_no which the recovering replica is waiting for. recoveryFuture = group.asyncRecoverReplica( newReplica, - (shard, sourceNode) -> new RecoveryTarget(shard, sourceNode, null, null, recoveryListener) { + (shard, sourceNode) -> new RecoveryTarget(shard, sourceNode, 0L, null, null, recoveryListener) { } ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index bbe5c57b1d00a..721b909e1d3c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -89,8 +89,8 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; import org.elasticsearch.snapshots.sourceonly.SourceOnlySnapshotRepository; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java index b71a2e481d087..0e3f60d56843d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlConfigVersion.java @@ -149,12 +149,14 @@ private static void checkUniqueness(int id, String uniqueId) { */ public static final MlConfigVersion V_10 = registerMlConfigVersion(10_00_00_99, "4B940FD9-BEDD-4589-8E08-02D9B480B22D"); + // V_11 is used in ELSER v2 package configs + public static final MlConfigVersion V_11 = registerMlConfigVersion(11_00_00_99, "79CB2950-57C7-11EE-AE5D-0800200C9A66"); /** * Reference to the most recent Ml config version. * This should be the Ml config version with the highest id. */ - public static final MlConfigVersion CURRENT = V_10; + public static final MlConfigVersion CURRENT = V_11; /** * Reference to the first MlConfigVersion that is detached from the diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index b00dcfd731aee..ad9ab7088fef5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -62,11 +62,11 @@ public class StartTrainedModelDeploymentAction extends ActionType DEFAULT_ALLOWED_SIGNATURE_ALGORITHMS = Collections.singletonList("RS256"); private static final boolean DEFAULT_POPULATE_USER_METADATA = true; private static final TimeValue DEFAULT_JWT_CACHE_TTL = TimeValue.timeValueMinutes(20); + private static final TimeValue DEFAULT_JWT_CLIENT_AUTH_GRACE_PERIOD = TimeValue.timeValueMinutes(1); private static final int DEFAULT_JWT_CACHE_SIZE = 100_000; private static final int MIN_JWT_CACHE_SIZE = 0; private static final TimeValue DEFAULT_HTTP_CONNECT_TIMEOUT = TimeValue.timeValueSeconds(5); @@ -172,7 +173,8 @@ private static Set> getNonSecureSettings() { CLAIMS_MAIL.getPattern(), CLAIMS_NAME.getClaim(), CLAIMS_NAME.getPattern(), - POPULATE_USER_METADATA + POPULATE_USER_METADATA, + CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD ) ); // JWT Client settings @@ -355,6 +357,12 @@ public Iterator> settings() { "client_authentication.shared_secret" ); + public static final Setting.AffixSetting CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD = Setting.affixKeySetting( + RealmSettings.realmSettingPrefix(TYPE), + "client_authentication.rotation_grace_period", + key -> Setting.timeSetting(key, DEFAULT_JWT_CLIENT_AUTH_GRACE_PERIOD, Setting.Property.NodeScope) + ); + // Individual Cache settings public static final Setting.AffixSetting JWT_CACHE_TTL = Setting.affixKeySetting( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java index 2739f64986439..652d6815eea46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java @@ -165,8 +165,9 @@ public class InternalUsers { ForceMergeAction.NAME + "*", // indices stats is used by rollover, so we need to grant it here IndicesStatsAction.NAME + "*", - UpdateSettingsAction.NAME - // Down-sampling related actions are not granted here because down-sampling is not supported for system data streams + UpdateSettingsAction.NAME, + DownsampleAction.NAME, + AddIndexBlockAction.NAME ) .allowRestrictedIndices(true) .build() }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TransportTLSBootstrapCheck.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TransportTLSBootstrapCheck.java index 5899736481884..5c5d556181343 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TransportTLSBootstrapCheck.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/TransportTLSBootstrapCheck.java @@ -8,6 +8,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.xpack.core.XPackSettings; /** @@ -27,4 +28,9 @@ public BootstrapCheckResult check(BootstrapContext context) { } return BootstrapCheckResult.success(); } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_TLS; + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 37358c483a749..d9495afebcab8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -90,9 +90,9 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; import org.elasticsearch.watcher.ResourceWatcherService; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java index 36562474d036b..6603353e967ea 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java @@ -254,7 +254,9 @@ public void testDataStreamLifecycleUser() { DeleteIndexAction.NAME, ForceMergeAction.NAME, IndicesStatsAction.NAME, - UpdateSettingsAction.NAME + UpdateSettingsAction.NAME, + DownsampleAction.NAME, + AddIndexBlockAction.NAME ); final String dataStream = randomAlphaOfLengthBetween(3, 12); checkIndexAccess(role, randomFrom(sampleIndexActions), dataStream, true); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java index 3cbcfe6c074c3..d4500d9439329 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/action/RestTermsEnumActionTests.java @@ -22,12 +22,12 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.usage.UsageService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java index e094ce30e2f8f..117c7d1265234 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/Deprecation.java @@ -29,8 +29,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.deprecation.logging.DeprecationCacheResetAction; diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index 60cc7c847b5e3..dcb9e53f1a671 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -35,8 +35,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java index e7cf7e9a7e489..19ad17d517cc9 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/EnterpriseSearch.java @@ -35,8 +35,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.application.analytics.AnalyticsTemplateRegistry; diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/LocalStateEnterpriseSearch.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/LocalStateEnterpriseSearch.java index 1ebe8a644a73f..d137f6e719ee3 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/LocalStateEnterpriseSearch.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/LocalStateEnterpriseSearch.java @@ -30,8 +30,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java index f7417f31943e5..3a18983c8d5a5 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlPlugin.java @@ -34,8 +34,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackPlugin; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java index d3ed8da1a17b0..3f2632d9a643f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/UnsupportedValueSource.java @@ -8,9 +8,9 @@ package org.elasticsearch.compute.lucene; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Rounding; import org.elasticsearch.index.fielddata.DocValueBits; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -20,8 +20,7 @@ public class UnsupportedValueSource extends ValuesSource { - public static final String UNSUPPORTED_OUTPUT = ""; - private static final BytesRef result = new BytesRef(UNSUPPORTED_OUTPUT); + public static final String UNSUPPORTED_OUTPUT = null; private final ValuesSource originalSource; public UnsupportedValueSource(ValuesSource originalSource) { @@ -37,22 +36,7 @@ public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOExc // ignore and fall back to UNSUPPORTED_OUTPUT } } - return new SortedBinaryDocValues() { - @Override - public boolean advanceExact(int doc) throws IOException { - return true; - } - - @Override - public int docValueCount() { - return 1; - } - - @Override - public BytesRef nextValue() throws IOException { - return result; - } - }; + return FieldData.emptySortedBinary(); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java index 7f852b3c6908b..d1d68df52362c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValueSources.java @@ -7,10 +7,16 @@ package org.elasticsearch.compute.lucene; +import org.apache.lucene.index.DocValues; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.SourceValueFetcherSortedBinaryIndexFieldData; import org.elasticsearch.index.fielddata.StoredFieldSortedBinaryIndexFieldData; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -24,6 +30,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.SearchContext; +import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -71,19 +78,9 @@ public static List sources( try { fieldData = ctx.getForField(fieldType, MappedFieldType.FielddataOperation.SEARCH); } catch (IllegalArgumentException e) { - if (asUnsupportedSource) { - sources.add( - new ValueSourceInfo( - new UnsupportedValueSourceType(fieldType.typeName()), - new UnsupportedValueSource(null), - elementType, - ctx.getIndexReader() - ) - ); - continue; - } else { - throw e; - } + sources.add(unsupportedValueSource(elementType, ctx, fieldType, e)); + HeaderWarning.addWarning("Field [{}] cannot be retrieved, it is unsupported or not indexed; returning null", fieldName); + continue; } var fieldContext = new FieldContext(fieldName, fieldData, fieldType); var vsType = fieldData.getValuesSourceType(); @@ -106,6 +103,56 @@ public static List sources( return sources; } + private static ValueSourceInfo unsupportedValueSource( + ElementType elementType, + SearchExecutionContext ctx, + MappedFieldType fieldType, + IllegalArgumentException e + ) { + return switch (elementType) { + case BYTES_REF -> new ValueSourceInfo( + new UnsupportedValueSourceType(fieldType.typeName()), + new UnsupportedValueSource(null), + elementType, + ctx.getIndexReader() + ); + case LONG, INT -> new ValueSourceInfo( + CoreValuesSourceType.NUMERIC, + ValuesSource.Numeric.EMPTY, + elementType, + ctx.getIndexReader() + ); + case BOOLEAN -> new ValueSourceInfo( + CoreValuesSourceType.BOOLEAN, + ValuesSource.Numeric.EMPTY, + elementType, + ctx.getIndexReader() + ); + case DOUBLE -> new ValueSourceInfo(CoreValuesSourceType.NUMERIC, new ValuesSource.Numeric() { + @Override + public boolean isFloatingPoint() { + return true; + } + + @Override + public SortedNumericDocValues longValues(LeafReaderContext context) { + return DocValues.emptySortedNumeric(); + } + + @Override + public SortedNumericDoubleValues doubleValues(LeafReaderContext context) throws IOException { + return org.elasticsearch.index.fielddata.FieldData.emptySortedNumericDoubles(); + } + + @Override + public SortedBinaryDocValues bytesValues(LeafReaderContext context) throws IOException { + return org.elasticsearch.index.fielddata.FieldData.emptySortedBinary(); + } + }, elementType, ctx.getIndexReader()); + default -> throw e; + }; + } + private static TextValueSource textValueSource(SearchExecutionContext ctx, MappedFieldType fieldType) { if (fieldType.isStored()) { IndexFieldData fieldData = new StoredFieldSortedBinaryIndexFieldData( diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index 6db4a8c4fe37d..08e793e43c612 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -22,7 +22,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -34,6 +33,7 @@ import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.Transports; import java.io.IOException; import java.util.Map; @@ -73,7 +73,7 @@ public ExchangeService(Settings settings, ThreadPool threadPool, String executor this.requestExecutorName = executorName; this.responseExecutor = threadPool.executor(executorName); final var inactiveInterval = settings.getAsTime(INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMinutes(5)); - this.inactiveSinksReaper = new InactiveSinksReaper(LOGGER, threadPool, inactiveInterval); + this.inactiveSinksReaper = new InactiveSinksReaper(LOGGER, threadPool, this.responseExecutor, inactiveInterval); } public void registerTransportHandler(TransportService transportService) { @@ -211,8 +211,8 @@ public void messageReceived(ExchangeRequest request, TransportChannel channel, T } private final class InactiveSinksReaper extends AbstractAsyncTask { - InactiveSinksReaper(Logger logger, ThreadPool threadPool, TimeValue interval) { - super(logger, threadPool, EsExecutors.DIRECT_EXECUTOR_SERVICE, interval, true); + InactiveSinksReaper(Logger logger, ThreadPool threadPool, Executor executor, TimeValue interval) { + super(logger, threadPool, executor, interval, true); rescheduleIfNecessary(); } @@ -224,6 +224,8 @@ protected boolean mustReschedule() { @Override protected void runInternal() { + assert Transports.assertNotTransportThread("reaping inactive exchanges can be expensive"); + assert ThreadPool.assertNotScheduleThread("reaping inactive exchanges can be expensive"); final TimeValue maxInterval = getInterval(); final long nowInMillis = threadPool.relativeTimeInMillis(); for (Map.Entry e : sinks.entrySet()) { diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml index a72205b3af064..d72d09644a128 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_tsdb.yml @@ -1,4 +1,6 @@ setup: + - skip: + features: allowed_warnings_regex - do: indices.create: index: test @@ -84,6 +86,8 @@ load everything: --- load a document: - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'from test | where @timestamp == "2021-04-28T18:50:23.142Z"' @@ -93,8 +97,8 @@ load a document: - match: {values.0.0: "2021-04-28T18:50:23.142Z"} - match: {values.0.1: "10.10.55.3"} - match: {values.0.2: "dog"} - - match: {values.0.3: ""} - - match: {values.0.4: ""} + - match: {values.0.3: null } + - match: {values.0.4: null } - match: {values.0.5: "df3145b3-0563-4d3b-a0f7-897eb2876ea9"} - match: {values.0.6: "pod"} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml index 52f7460ea727e..44af9559598ab 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/40_unsupported_types.yml @@ -1,5 +1,6 @@ ---- -unsupported: +setup: + - skip: + features: allowed_warnings_regex - do: indices.create: index: test @@ -98,10 +99,15 @@ unsupported: "some_doc": { "foo": "xy", "bar": 12 } } +--- +unsupported: - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" esql.query: body: query: 'from test' + - match: { columns.0.name: aggregate_metric_double } - match: { columns.0.type: unsupported } - match: { columns.1.name: binary } @@ -158,29 +164,29 @@ unsupported: - match: { columns.26.type: integer } - length: { values: 1 } - - match: { values.0.0: "" } - - match: { values.0.1: "" } - - match: { values.0.2: "" } - - match: { values.0.3: "" } - - match: { values.0.4: "" } - - match: { values.0.5: "" } - - match: { values.0.6: "" } - - match: { values.0.7: "" } - - match: { values.0.8: "" } - - match: { values.0.9: "" } - - match: { values.0.10: "" } - - match: { values.0.11: "" } - - match: { values.0.12: "" } - - match: { values.0.13: "" } + - match: { values.0.0: null } + - match: { values.0.1: null } + - match: { values.0.2: null } + - match: { values.0.3: null } + - match: { values.0.4: null } + - match: { values.0.5: null } + - match: { values.0.6: null } + - match: { values.0.7: null } + - match: { values.0.8: null } + - match: { values.0.9: null } + - match: { values.0.10: null } + - match: { values.0.11: null } + - match: { values.0.12: null } + - match: { values.0.13: null } - match: { values.0.14: "foo bar baz" } - match: { values.0.15: Alice } - - match: { values.0.16: "" } - - match: { values.0.17: "" } - - match: { values.0.18: "" } - - match: { values.0.19: "" } - - match: { values.0.20: "" } - - match: { values.0.21: "" } - - match: { values.0.22: "" } + - match: { values.0.16: null } + - match: { values.0.17: null } + - match: { values.0.18: null } + - match: { values.0.19: null } + - match: { values.0.20: null } + - match: { values.0.21: null } + - match: { values.0.22: null } - match: { values.0.23: 12 } - match: { values.0.24: xy } - match: { values.0.25: "foo bar" } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml index 06d2b5e461822..91ff5ddc7cbe9 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/70_locale.yml @@ -26,7 +26,7 @@ setup: - do: esql.query: body: - query: 'FROM events | eval fixed_format = date_format(@timestamp, "MMMM"), variable_format = date_format(@timestamp, format) | sort @timestamp | keep @timestamp, fixed_format, variable_format' + query: 'FROM events | eval fixed_format = date_format("MMMM", @timestamp), variable_format = date_format(format, @timestamp) | sort @timestamp | keep @timestamp, fixed_format, variable_format' - match: { columns.0.name: "@timestamp" } - match: { columns.0.type: "date" } @@ -45,7 +45,7 @@ setup: - do: esql.query: body: - query: 'FROM events | eval fixed_format = date_format(@timestamp, "MMMM"), variable_format = date_format(@timestamp, format) | sort @timestamp | keep @timestamp, fixed_format, variable_format' + query: 'FROM events | eval fixed_format = date_format("MMMM", @timestamp), variable_format = date_format(format, @timestamp) | sort @timestamp | keep @timestamp, fixed_format, variable_format' locale: "it-IT" - match: { columns.0.name: "@timestamp" } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml new file mode 100644 index 0000000000000..53dc5bab6df46 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/90_non_indexed.yml @@ -0,0 +1,146 @@ +setup: + - skip: + features: allowed_warnings_regex + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 5 + mappings: + properties: + boolean: + type: boolean + boolean_noidx: + type: boolean + index: false + doc_values: false + date: + type: date + date_noidx: + type: date + index: false + doc_values: false + double: + type: double + double_noidx: + type: double + index: false + doc_values: false + float: + type: float + float_noidx: + type: float + index: false + doc_values: false + integer: + type: integer + integer_noidx: + type: integer + index: false + doc_values: false + ip: + type: ip + ip_noidx: + type: ip + index: false + doc_values: false + keyword: + type: keyword + keyword_noidx: + type: keyword + index: false + doc_values: false + long: + type: long + long_noidx: + type: long + index: false + doc_values: false + + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { + "keyword": "foo", + "keyword_noidx": "foo", + "boolean": true, + "boolean_noidx": true, + "integer": 10, + "integer_noidx": 10, + "long": 20, + "long_noidx": 20, + "float": 30, + "float_noidx": 30, + "double": 40, + "double_noidx": 40, + "date": "2021-04-28T18:50:04.467Z", + "date_noidx": "2021-04-28T18:50:04.467Z", + "ip": "192.168.0.1", + "ip_noidx": "192.168.0.1" + } + +--- +unsupported: + - do: + allowed_warnings_regex: + - "Field \\[.*\\] cannot be retrieved, it is unsupported or not indexed; returning null" + esql.query: + body: + query: 'from test' + + - match: { columns.0.name: boolean } + - match: { columns.0.type: boolean } + - match: { columns.1.name: boolean_noidx } + - match: { columns.1.type: boolean } + - match: { columns.2.name: date } + - match: { columns.2.type: date } + - match: { columns.3.name: date_noidx } + - match: { columns.3.type: date } + - match: { columns.4.name: double } + - match: { columns.4.type: double } + - match: { columns.5.name: double_noidx } + - match: { columns.5.type: double } + - match: { columns.6.name: float } + - match: { columns.6.type: double } + - match: { columns.7.name: float_noidx } + - match: { columns.7.type: double } + - match: { columns.8.name: integer } + - match: { columns.8.type: integer } + - match: { columns.9.name: integer_noidx } + - match: { columns.9.type: integer } + - match: { columns.10.name: ip } + - match: { columns.10.type: ip } + - match: { columns.11.name: ip_noidx } + - match: { columns.11.type: ip } + - match: { columns.12.name: keyword } + - match: { columns.12.type: keyword } + - match: { columns.13.name: keyword_noidx } + - match: { columns.13.type: keyword } + - match: { columns.14.name: long } + - match: { columns.14.type: long } + - match: { columns.15.name: long_noidx } + - match: { columns.15.type: long } + + - length: { values: 1 } + + - match: { values.0.0: true } + - match: { values.0.1: null } + - match: { values.0.2: "2021-04-28T18:50:04.467Z" } + - match: { values.0.3: null } + - match: { values.0.4: 40 } + - match: { values.0.5: null } + - match: { values.0.6: 30 } + - match: { values.0.7: null } + - match: { values.0.8: 10 } + - match: { values.0.9: null } + - match: { values.0.10: "192.168.0.1" } + - match: { values.0.11: null } + - match: { values.0.12: "foo" } + - match: { values.0.13: "foo" } # this is a special case, ESQL can retrieve keywords from source + - match: { values.0.14: 20 } + - match: { values.0.15: null } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 7d90cf47cae09..82c5f65d210ce 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -336,7 +336,47 @@ public void testErrorMessageForInvalidTypeInParams() throws IOException { ResponseException.class, () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"byte\", \"value\": 5}]").build()) ); - assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("illegal data type [byte]")); + assertThat( + EntityUtils.toString(re.getResponse().getEntity()), + containsString("EVAL does not support type [byte] in expression [?]") + ); + } + + public void testErrorMessageForLiteralDateMathOverflow() throws IOException { + List datePeriodOverflowExpressions = List.of( + "2147483647 day + 1 day", + "306783378 week + 1 week", + "2147483647 year + 1 year" + ); + // We cannot easily force an overflow using just milliseconds, since these are divided by 1000 and then the resulting seconds are + // stored in a long. But combining with seconds works. + List timeDurationOverflowExpressions = List.of( + "9223372036854775807 second + 1000 millisecond", + "9223372036854775807 second + 1 second", + "153722867280912930 minute + 1 minute", + "2562047788015215 hour + 1 hour" + ); + + for (String overflowExp : datePeriodOverflowExpressions) { + assertDateMathOverflow(overflowExp, "integer overflow"); + } + for (String overflowExp : timeDurationOverflowExpressions) { + assertDateMathOverflow(overflowExp, "long overflow"); + } + } + + private static void assertDateMathOverflow(String overflowExpression, String expectedOverflowMessage) throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = now() + (" + overflowExpression + ")").build()) + ); + + String responseMessage = EntityUtils.toString(re.getResponse().getEntity()); + assertThat(responseMessage, containsString("arithmetic exception in expression [" + overflowExpression + "]:")); + // The second part of the error message might come after a newline, so we check for it separately. + assertThat(responseMessage, containsString("[" + expectedOverflowMessage + "]")); + + assertThat(re.getResponse().getStatusLine().getStatusCode(), equalTo(400)); } public void testErrorMessageForArrayValuesInParams() throws IOException { diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec index f670738bd3c49..6ddc9601db4ac 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/blog-ignoreCsvTests.csv-spec @@ -2,7 +2,7 @@ FROM employees | WHERE still_hired == true -| EVAL hired = DATE_FORMAT(hire_date, "YYYY") +| EVAL hired = DATE_FORMAT("YYYY", hire_date) | STATS avg_salary = AVG(salary) BY languages | EVAL avg_salary = ROUND(avg_salary) | EVAL lang_code = TO_STRING(languages) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index e04e870da7713..5adecec275682 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -45,7 +45,7 @@ emp_no:integer | x:date evalDateFormat -from employees | sort hire_date | eval x = date_format(hire_date), y = date_format(hire_date, "YYYY-MM-dd") | keep emp_no, x, y | limit 5; +from employees | sort hire_date | eval x = date_format(hire_date), y = date_format("YYYY-MM-dd", hire_date) | keep emp_no, x, y | limit 5; emp_no:integer | x:keyword | y:keyword 10009 | 1985-02-18T00:00:00.000Z | 1985-02-18 @@ -295,7 +295,7 @@ hire_date:date | hd:date ; now -row a = now() | eval x = a == now(), y = substring(date_format(a, "yyyy"), 0, 2) | keep x, y; +row a = now() | eval x = a == now(), y = substring(date_format("yyyy", a), 0, 2) | keep x, y; x:boolean | y:keyword true | 20 @@ -338,14 +338,14 @@ AVG(salary):double | bucket:date ; evalDateParseWithSimpleDate -row a = "2023-02-01" | eval b = date_parse(a, "yyyy-MM-dd") | keep b; +row a = "2023-02-01" | eval b = date_parse("yyyy-MM-dd", a) | keep b; b:datetime 2023-02-01T00:00:00.000Z ; evalDateParseWithDateTime -row a = "2023-02-01 12:15:55" | eval b = date_parse(a, "yyyy-MM-dd HH:mm:ss") | keep b; +row a = "2023-02-01 12:15:55" | eval b = date_parse("yyyy-MM-dd HH:mm:ss", a) | keep b; b:datetime 2023-02-01T12:15:55.000Z @@ -359,8 +359,8 @@ b:datetime ; evalDateParseWrongDate -row a = "2023-02-01 foo" | eval b = date_parse(a, "yyyy-MM-dd") | keep b; -warning:Line 1:37: evaluation of [date_parse(a, \"yyyy-MM-dd\")] failed, treating result as null. Only first 20 failures recorded. +row a = "2023-02-01 foo" | eval b = date_parse("yyyy-MM-dd", a) | keep b; +warning:Line 1:37: evaluation of [date_parse(\"yyyy-MM-dd\", a)] failed, treating result as null. Only first 20 failures recorded. warning:java.lang.IllegalArgumentException: failed to parse date field [2023-02-01 foo] with format [yyyy-MM-dd] b:datetime @@ -368,16 +368,16 @@ null ; evalDateParseNotMatching -row a = "2023-02-01" | eval b = date_parse(a, "yyyy-MM") | keep b; -warning:Line 1:33: evaluation of [date_parse(a, \"yyyy-MM\")] failed, treating result as null. Only first 20 failures recorded. +row a = "2023-02-01" | eval b = date_parse("yyyy-MM", a) | keep b; +warning:Line 1:33: evaluation of [date_parse(\"yyyy-MM\", a)] failed, treating result as null. Only first 20 failures recorded. warning:java.lang.IllegalArgumentException: failed to parse date field [2023-02-01] with format [yyyy-MM] b:datetime null ; evalDateParseNotMatching2 -row a = "2023-02-01" | eval b = date_parse(a, "yyyy-MM-dd HH:mm:ss") | keep b; -warning:Line 1:33: evaluation of [date_parse(a, \"yyyy-MM-dd HH:mm:ss\")] failed, treating result as null. Only first 20 failures recorded. +row a = "2023-02-01" | eval b = date_parse("yyyy-MM-dd HH:mm:ss", a) | keep b; +warning:Line 1:33: evaluation of [date_parse(\"yyyy-MM-dd HH:mm:ss\", a)] failed, treating result as null. Only first 20 failures recorded. warning:java.lang.IllegalArgumentException: failed to parse date field [2023-02-01] with format [yyyy-MM-dd HH:mm:ss] b:datetime @@ -385,7 +385,7 @@ null ; evalDateParseNullPattern -row a = "2023-02-01" | eval b = date_parse(a, null) | keep b; +row a = "2023-02-01" | eval b = date_parse(null, a) | keep b; b:datetime null @@ -393,8 +393,8 @@ null evalDateParseDynamic from employees | where emp_no == 10039 or emp_no == 10040 | sort emp_no -| eval birth_date_string = date_format(birth_date, "yyyy-MM-dd") -| eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; +| eval birth_date_string = date_format("yyyy-MM-dd", birth_date) +| eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean 10039 | 1959-10-01 | 1959-10-01 | true @@ -403,8 +403,8 @@ emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean evalDateParseDynamic2 from employees | where emp_no >= 10047 | sort emp_no | where emp_no <= 10051 -| eval birth_date_string = date_format(birth_date, "yyyy-MM-dd") -| eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") +| eval birth_date_string = date_format("yyyy-MM-dd", birth_date) +| eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | keep emp_no, new_date, birth_date | eval bool = new_date == birth_date; emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean @@ -418,8 +418,8 @@ emp_no:integer | new_date:datetime | birth_date:datetime | bool:boo evalDateParseDynamicDateAndPattern from employees | where emp_no == 10049 or emp_no == 10050 | sort emp_no -| eval pattern = "yyyy-MM-dd", birth_date_string = date_format(birth_date, pattern) -| eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; +| eval pattern = "yyyy-MM-dd", birth_date_string = date_format(pattern, birth_date) +| eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool; emp_no:integer | new_date:datetime | birth_date:datetime | bool:boolean 10049 | null | null | null @@ -437,7 +437,7 @@ emp_no:integer | new_date:datetime | birth_date:datetime | bool: dateFields from employees | where emp_no == 10049 or emp_no == 10050 -| eval year = date_extract(birth_date, "year"), month = date_extract(birth_date, "month_of_year"), day = date_extract(birth_date, "day_of_month") +| eval year = date_extract("year", birth_date), month = date_extract("month_of_year", birth_date), day = date_extract("day_of_month", birth_date) | keep emp_no, year, month, day; ignoreOrder:true @@ -449,7 +449,7 @@ emp_no:integer | year:long | month:long | day:long dateFormatLocale from employees | where emp_no == 10049 or emp_no == 10050 | sort emp_no -| eval birth_month = date_format(birth_date, "MMMM") | keep emp_no, birth_date, birth_month; +| eval birth_month = date_format("MMMM", birth_date) | keep emp_no, birth_date, birth_month; ignoreOrder:true emp_no:integer | birth_date:datetime | birth_month:keyword @@ -465,12 +465,27 @@ dt:datetime |plus:datetime 2100-01-01T01:01:01.000Z |2104-04-16T01:01:01.000Z ; -datePlusDuration -row dt = to_dt("2100-01-01T00:00:00.000Z") -| eval plus = dt + 1 hour + 1 minute + 1 second + 1 milliseconds; +datePlusPeriodFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = 4 years + 3 months + 2 weeks + 1 day + n | keep then; -dt:datetime |plus:datetime -2100-01-01T00:00:00.000Z |2100-01-01T01:01:01.001Z +then:datetime +1957-07-19T00:00:00.000Z +; + +datePlusMixedPeriodsFromLeft +row n = to_dt("1953-04-01T00:00:00.000Z") +| eval then = 4 years + 3 months + 1 year + 2 weeks + 1 month + 1 day + 1 week + 1 day + n +| keep then; + +then:datetime +1958-08-24T00:00:00.000Z +; + +datePlusSumOfPeriodsFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = (4 years + 3 months + 2 weeks + 1 day) + n | keep then; + +then:datetime +1957-07-19T00:00:00.000Z ; dateMinusPeriod @@ -481,6 +496,61 @@ dt:datetime |minus:datetime 2104-04-16T01:01:01.000Z |2100-01-01T01:01:01.000Z ; +dateMinusPeriodFromLeft +row n = to_dt("1957-07-19T00:00:00.000Z") | eval then = -4 years - 3 months - 2 weeks - 1 day + n | keep then; + +then:datetime +1953-04-04T00:00:00.000Z +; + +dateMinusSumOfNegativePeriods +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = n - (-4 years - 3 months - 2 weeks - 1 day)| keep then; + +then:datetime +1957-07-19T00:00:00.000Z +; + +dateMinusPeriodsFromLeftMultipleEvals +row n = to_dt("1953-04-04T00:00:00.000Z") +| eval x = -4 years + n +| eval y = -3 months + x, then = y + (-2 weeks - 1 day) +| keep then; + +then:datetime +1948-12-20T00:00:00.000Z +; + +datePlusDuration +row dt = to_dt("2100-01-01T00:00:00.000Z") +| eval plus = dt + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:datetime |plus:datetime +2100-01-01T00:00:00.000Z |2100-01-01T01:01:01.001Z +; + +datePlusDurationFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = 1 hour + 1 minute + 1 second + 1 milliseconds + n | keep then; + +then:datetime +1953-04-04T01:01:01.001Z +; + +datePlusMixedDurationsFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") +| eval then = 1 hour + 1 minute + 2 hour + 1 second + 2 minute + 1 milliseconds + 2 second + 2 millisecond + n +| keep then; + +then:datetime +1953-04-04T03:03:03.003Z +; + +datePlusSumOfDurationsFromLeft +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = (1 hour + 1 minute + 1 second + 1 milliseconds) + n | keep then; + +then:datetime +1953-04-04T01:01:01.001Z +; + dateMinusDuration row dt = to_dt("2100-01-01T01:01:01.001Z") | eval minus = dt - 1 hour - 1 minute - 1 second - 1 milliseconds; @@ -489,6 +559,31 @@ dt:datetime |minus:datetime 2100-01-01T01:01:01.001Z |2100-01-01T00:00:00.000Z ; +dateMinusDurationFromLeft +row n = to_dt("1953-04-04T01:01:01.001Z") | eval then = -1 hour - 1 minute - 1 second - 1 milliseconds + n | keep then; + +then:datetime +1953-04-04T00:00:00.000Z +; + +dateMinusSumOfNegativeDurations +row n = to_dt("1953-04-04T00:00:00.000Z") | eval then = n - (-1 hour - 1 minute - 1 second - 1 milliseconds) | keep then; + +then:datetime +1953-04-04T01:01:01.001Z +; + +dateMinusDurationsFromLeftMultipleEvals +row n = to_dt("1953-04-04T04:03:02.001Z") +| eval x = -4 hour + n +| eval y = -3 minute + x, then = y + (-2 second - 1 millisecond) +| keep then +; + +then:datetime +1953-04-04T00:00:00.000Z +; + datePlusPeriodAndDuration row dt = to_dt("2100-01-01T00:00:00.000Z") | eval plus = dt + 4 years + 3 months + 2 weeks + 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 137820e695892..d1aa4dd811df3 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -233,7 +233,7 @@ avg_lang:double | max_lang:integer docsStatsGroupByMultipleValues // tag::statsGroupByMultipleValues[] FROM employees -| EVAL hired = DATE_FORMAT(hire_date, "YYYY") +| EVAL hired = DATE_FORMAT("YYYY", hire_date) | STATS avg_salary = AVG(salary) BY hired, languages.long | EVAL avg_salary = ROUND(avg_salary) | SORT hired, languages.long @@ -293,8 +293,8 @@ Uri |Lenart |1.75 dateExtract // tag::dateExtract[] -ROW date = DATE_PARSE("2022-05-06", "yyyy-MM-dd") -| EVAL year = DATE_EXTRACT(date, "year") +ROW date = DATE_PARSE("yyyy-MM-dd", "2022-05-06") +| EVAL year = DATE_EXTRACT("year", date) // end::dateExtract[] ; @@ -404,7 +404,7 @@ Saniya |Kalloufi |2.1 |6.9 dateParse // tag::dateParse[] ROW date_string = "2022-05-06" -| EVAL date = DATE_PARSE(date_string, "yyyy-MM-dd") +| EVAL date = DATE_PARSE("yyyy-MM-dd", date_string) // end::dateParse[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json index b650cb7e64564..9ce87d01bfbb9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json @@ -28,6 +28,11 @@ "type": "keyword" } } + }, + "long_noidx": { + "type": "long", + "index": false, + "doc_values": false } } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index da559485d17ff..89e4c3b2b8174 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -322,7 +322,7 @@ c:long | g:keyword | tws:long ; byStringAndString -from employees | eval hire_year_str = date_format(hire_date, "yyyy") | stats c = count(gender) by gender, hire_year_str | sort c desc, gender, hire_year_str | where c >= 5; +from employees | eval hire_year_str = date_format("yyyy", hire_date) | stats c = count(gender) by gender, hire_year_str | sort c desc, gender, hire_year_str | where c >= 5; c:long | gender:keyword | hire_year_str:keyword 8 | F | 1989 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec index 2af60793f3416..462045d9968ee 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_count_distinct.csv-spec @@ -68,7 +68,7 @@ c:long ; countDistinctOfKeywords -from employees | eval hire_year_str = date_format(hire_date, "yyyy") | stats g = count_distinct(gender), h = count_distinct(hire_year_str); +from employees | eval hire_year_str = date_format("yyyy", hire_date) | stats g = count_distinct(gender), h = count_distinct(hire_year_str); g:long | h:long 2 | 14 diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java index bdbcd9a548f58..76c874b0fe63d 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java @@ -88,7 +88,7 @@ public void testBoolean() throws InterruptedException, IOException { public void testDate() throws InterruptedException, IOException { createIndexWithConstRuntimeField("date"); EsqlQueryResponse response = run(""" - from test | eval d=date_format(const, "yyyy") | stats min (foo) by d"""); + from test | eval d=date_format("yyyy", const) | stats min (foo) by d"""); assertThat(getValuesList(response), equalTo(List.of(List.of(0L, "2023")))); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index 4c79b2453f8e3..1f5e6ee3fd6ed 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -172,6 +172,8 @@ else if (p.resolved()) { } } else if (p instanceof Row row) { failures.addAll(validateRow(row)); + } else if (p instanceof Eval eval) { + failures.addAll(validateEval(eval)); } p.forEachExpression(BinaryOperator.class, bo -> { @@ -236,6 +238,19 @@ private static Collection validateRow(Row row) { return failures; } + private static Collection validateEval(Eval eval) { + List failures = new ArrayList<>(eval.fields().size()); + eval.fields().forEach(field -> { + DataType dataType = field.dataType(); + if (EsqlDataTypes.isRepresentable(dataType) == false) { + failures.add( + fail(field, "EVAL does not support type [{}] in expression [{}]", dataType.typeName(), field.child().sourceText()) + ); + } + }); + return failures; + } + /** * Limit QL's comparisons to types we support. */ diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java index c7c923e8e912a..455c9d162dc8a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java @@ -17,6 +17,9 @@ import java.time.ZoneId; import java.time.ZoneOffset; import java.util.Objects; +import java.util.function.Predicate; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; public abstract class BinaryDateTimeFunction extends BinaryScalarFunction { @@ -66,4 +69,12 @@ public boolean equals(Object o) { BinaryDateTimeFunction that = (BinaryDateTimeFunction) o; return zoneId().equals(that.zoneId()); } + + // TODO: drop check once 8.11 is released + static TypeResolution argumentTypesAreSwapped(DataType left, DataType right, Predicate rightTest, String source) { + if (DataTypes.isDateTime(left) && rightTest.test(right)) { + return new TypeResolution(format(null, "function definition has been updated, please swap arguments in [{}]", source)); + } + return TypeResolution.TYPE_RESOLVED; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java index 66dbb1dd33901..1b33d5829e472 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java @@ -31,6 +31,7 @@ import java.util.Locale; import java.util.function.Function; +import static org.elasticsearch.xpack.esql.expression.function.scalar.date.BinaryDateTimeFunction.argumentTypesAreSwapped; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isDate; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isStringAndExact; @@ -38,22 +39,22 @@ public class DateExtract extends ConfigurationFunction implements EvaluatorMappe private ChronoField chronoField; - public DateExtract(Source source, Expression field, Expression chronoFieldExp, Configuration configuration) { - super(source, List.of(field, chronoFieldExp), configuration); + public DateExtract(Source source, Expression chronoFieldExp, Expression field, Configuration configuration) { + super(source, List.of(chronoFieldExp, field), configuration); } @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - var fieldEvaluator = toEvaluator.apply(children().get(0)); - if (children().get(1).foldable()) { + var fieldEvaluator = toEvaluator.apply(children().get(1)); + if (children().get(0).foldable()) { ChronoField chrono = chronoField(); if (chrono == null) { - BytesRef field = (BytesRef) children().get(1).fold(); + BytesRef field = (BytesRef) children().get(0).fold(); throw new EsqlIllegalArgumentException("invalid date field for [{}]: {}", sourceText(), field.utf8ToString()); } return dvrCtx -> new DateExtractConstantEvaluator(fieldEvaluator.get(dvrCtx), chrono, configuration().zoneId(), dvrCtx); } - var chronoEvaluator = toEvaluator.apply(children().get(1)); + var chronoEvaluator = toEvaluator.apply(children().get(0)); return dvrCtx -> new DateExtractEvaluator( source(), fieldEvaluator.get(dvrCtx), @@ -65,7 +66,7 @@ public ExpressionEvaluator.Factory toEvaluator(Function newChildren) { @Override protected NodeInfo info() { - return NodeInfo.create(this, DateFormat::new, field, format, configuration()); + Expression first = format != null ? format : field; + Expression second = format != null ? field : null; + return NodeInfo.create(this, DateFormat::new, first, second, configuration()); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java index 98d75cbf672df..d1565091d320c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.ZoneId; -import java.util.Arrays; import java.util.List; import java.util.function.Function; @@ -41,10 +40,10 @@ public class DateParse extends ScalarFunction implements OptionalArgument, Evalu private final Expression field; private final Expression format; - public DateParse(Source source, Expression field, Expression format) { - super(source, format != null ? Arrays.asList(field, format) : Arrays.asList(field)); - this.field = field; - this.format = format; + public DateParse(Source source, Expression first, Expression second) { + super(source, second != null ? List.of(first, second) : List.of(first)); + this.field = second != null ? second : first; + this.format = second != null ? first : null; } @Override @@ -58,12 +57,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = isString(field, sourceText(), FIRST); + TypeResolution resolution = isString(field, sourceText(), format != null ? SECOND : FIRST); if (resolution.unresolved()) { return resolution; } if (format != null) { - resolution = isStringAndExact(format, sourceText(), SECOND); + resolution = isStringAndExact(format, sourceText(), FIRST); if (resolution.unresolved()) { return resolution; } @@ -126,7 +125,9 @@ public Expression replaceChildren(List newChildren) { @Override protected NodeInfo info() { - return NodeInfo.create(this, DateParse::new, field, format); + Expression first = format != null ? format : field; + Expression second = format != null ? field : null; + return NodeInfo.create(this, DateParse::new, first, second); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java index d7964e6c011fd..55885bf514fe2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.ql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.Duration; import java.time.Period; @@ -26,8 +25,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; -import static org.elasticsearch.common.logging.LoggerMessageFormat.format; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isDate; @@ -45,7 +42,12 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = argumentTypesAreSwapped(); + TypeResolution resolution = argumentTypesAreSwapped( + left().dataType(), + right().dataType(), + EsqlDataTypes::isTemporalAmount, + sourceText() + ); if (resolution.unresolved()) { return resolution; } @@ -58,14 +60,6 @@ protected TypeResolution resolveType() { return isType(interval(), EsqlDataTypes::isTemporalAmount, sourceText(), SECOND, "dateperiod", "timeduration"); } - // TODO: drop check once 8.11 is released - private TypeResolution argumentTypesAreSwapped() { - if (DataTypes.isDateTime(left().dataType()) && isTemporalAmount(right().dataType())) { - return new TypeResolution(format(null, "function definition has been updated, please swap arguments in [{}]", sourceText())); - } - return TypeResolution.TYPE_RESOLVED; - } - @Override public Object fold() { return EvaluatorMapper.super.fold(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java index 7da2754bfd931..f59211ab42882 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java @@ -15,6 +15,8 @@ import org.elasticsearch.xpack.ql.tree.Source; import java.time.DateTimeException; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.ADD; @@ -88,4 +90,14 @@ static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount // using a UTC conversion since `datetime` is always a UTC-Epoch timestamp, either read from ES or converted through a function return asMillis(asDateTime(datetime).plus(temporalAmount)); } + + @Override + public Period fold(Period left, Period right) { + return left.plus(right); + } + + @Override + public Duration fold(Duration left, Duration right) { + return left.plus(right); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 3780e19a1dfd9..19552d4e873cd 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -9,21 +9,45 @@ import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.esql.EsqlClientException; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import java.util.function.Function; -import java.util.function.Predicate; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; abstract class DateTimeArithmeticOperation extends EsqlArithmeticOperation { + /** + * Custom exception to handle e.g. overflows when folding temporal values; we want to set the correct HTTP status (400). + */ + private static class IllegalTemporalValueException extends EsqlClientException { + protected IllegalTemporalValueException(String message, Object... args) { + super(message, args); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + public static IllegalTemporalValueException fromArithmeticException(Source source, ArithmeticException e) { + return new IllegalTemporalValueException("arithmetic exception in expression [{}]: [{}]", source.text(), e.getMessage()); + } + } + /** Arithmetic (quad) function. */ interface DatetimeArithmeticEvaluator { ExpressionEvaluator apply( @@ -55,31 +79,100 @@ ExpressionEvaluator apply( protected TypeResolution resolveType() { DataType leftType = left().dataType(); DataType rightType = right().dataType(); - // date math is only possible if one argument is a DATETIME and the other a (foldable) TemporalValue + + // Date math is only possible if either + // - one argument is a DATETIME and the other a (foldable) TemporalValue, or + // - both arguments are TemporalValues (so we can fold them). if (isDateTimeOrTemporal(leftType) || isDateTimeOrTemporal(rightType)) { - if (argumentOfType(DataTypes::isDateTime) == null || argumentOfType(EsqlDataTypes::isTemporalAmount) == null) { - return new TypeResolution( - format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), leftType, rightType) - ); + if ((leftType == DataTypes.DATETIME && isTemporalAmount(rightType)) + || (rightType == DataTypes.DATETIME && isTemporalAmount(leftType))) { + return TypeResolution.TYPE_RESOLVED; + } + if (leftType == TIME_DURATION && rightType == TIME_DURATION) { + return TypeResolution.TYPE_RESOLVED; + } + if (leftType == DATE_PERIOD && rightType == DATE_PERIOD) { + return TypeResolution.TYPE_RESOLVED; } - return TypeResolution.TYPE_RESOLVED; + + return new TypeResolution( + format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), leftType, rightType) + ); } return super.resolveType(); } + /** + * Override this to allow processing literals of type {@link EsqlDataTypes#DATE_PERIOD} when folding constants. + * Used in {@link DateTimeArithmeticOperation#fold()}. + * @param left the left period + * @param right the right period + * @return the result of the evaluation + */ + abstract Period fold(Period left, Period right); + + /** + * Override this to allow processing literals of type {@link EsqlDataTypes#TIME_DURATION} when folding constants. + * Used in {@link DateTimeArithmeticOperation#fold()}. + * @param left the left duration + * @param right the right duration + * @return the result of the evaluation + */ + abstract Duration fold(Duration left, Duration right); + + @Override + public final Object fold() { + DataType leftDataType = left().dataType(); + DataType rightDataType = right().dataType(); + if (leftDataType == DATE_PERIOD && rightDataType == DATE_PERIOD) { + // Both left and right expressions are temporal amounts; we can assume they are both foldable. + Period l = (Period) left().fold(); + Period r = (Period) right().fold(); + try { + return fold(l, r); + } catch (ArithmeticException e) { + // Folding will be triggered before the plan is sent to the compute service, so we have to handle arithmetic exceptions + // manually and provide a user-friendly error message. + throw IllegalTemporalValueException.fromArithmeticException(source(), e); + } + } + if (leftDataType == TIME_DURATION && rightDataType == TIME_DURATION) { + // Both left and right expressions are temporal amounts; we can assume they are both foldable. + Duration l = (Duration) left().fold(); + Duration r = (Duration) right().fold(); + try { + return fold(l, r); + } catch (ArithmeticException e) { + // Folding will be triggered before the plan is sent to the compute service, so we have to handle arithmetic exceptions + // manually and provide a user-friendly error message. + throw IllegalTemporalValueException.fromArithmeticException(source(), e); + } + } + return super.fold(); + } + @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - return dataType() == DataTypes.DATETIME - ? dvrCtx -> datetimes.apply( + if (dataType() == DataTypes.DATETIME) { + // One of the arguments has to be a datetime and the other a temporal amount. + Expression datetimeArgument; + Expression temporalAmountArgument; + if (left().dataType() == DataTypes.DATETIME) { + datetimeArgument = left(); + temporalAmountArgument = right(); + } else { + datetimeArgument = right(); + temporalAmountArgument = left(); + } + + return dvrCtx -> datetimes.apply( source(), - toEvaluator.apply(argumentOfType(DataTypes::isDateTime)).get(dvrCtx), - (TemporalAmount) argumentOfType(EsqlDataTypes::isTemporalAmount).fold(), + toEvaluator.apply(datetimeArgument).get(dvrCtx), + (TemporalAmount) temporalAmountArgument.fold(), dvrCtx - ) - : super.toEvaluator(toEvaluator); - } - - private Expression argumentOfType(Predicate filter) { - return filter.test(left().dataType()) ? left() : filter.test(right().dataType()) ? right() : null; + ); + } else { + return super.toEvaluator(toEvaluator); + } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java index 5a417134c96fc..d09ae25d91746 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java @@ -104,7 +104,7 @@ ExpressionEvaluator apply( } @Override - public final Object fold() { + public Object fold() { return EvaluatorMapper.super.fold(); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java index b00346b8cceb7..ba071c05a15a8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java @@ -17,6 +17,8 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.DateTimeException; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; @@ -44,7 +46,8 @@ public Sub(Source source, Expression left, Expression right) { @Override protected TypeResolution resolveType() { TypeResolution resolution = super.resolveType(); - if (resolution.resolved() && EsqlDataTypes.isDateTimeOrTemporal(dataType()) && DataTypes.isDateTime(left().dataType()) == false) { + // As opposed to general date time arithmetics, we cannot subtract a datetime from something else. + if (resolution.resolved() && EsqlDataTypes.isDateTimeOrTemporal(dataType()) && DataTypes.isDateTime(right().dataType())) { return new TypeResolution( format( null, @@ -100,4 +103,14 @@ static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount // using a UTC conversion since `datetime` is always a UTC-Epoch timestamp, either read from ES or converted through a function return asMillis(asDateTime(datetime).minus(temporalAmount)); } + + @Override + public Period fold(Period left, Period right) { + return left.minus(right); + } + + @Override + public Duration fold(Duration left, Duration right) { + return left.minus(right); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index e837858153cc1..0f4d194d8016c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -230,7 +230,7 @@ public static boolean canPushToSource(Expression exp) { private static boolean isAttributePushable(Expression expression, ScalarFunction operation) { if (expression instanceof FieldAttribute f && f.getExactInfo().hasExact()) { - return true; + return isAggregatable(f); } if (expression instanceof MetadataAttribute ma && ma.searchable()) { return operation == null @@ -243,6 +243,17 @@ private static boolean isAttributePushable(Expression expression, ScalarFunction } } + /** + * this method is supposed to be used to define if a field can be used for exact push down (eg. sort or filter). + * "aggregatable" is the most accurate information we can have from field_caps as of now. + * Pushing down operations on fields that are not aggregatable would result in an error. + * @param f + * @return + */ + private static boolean isAggregatable(FieldAttribute f) { + return f.exactAttribute().field().isAggregatable(); + } + private static class PushLimitToSource extends OptimizerRule { @Override protected PhysicalPlan rule(LimitExec limitExec) { @@ -280,7 +291,8 @@ protected PhysicalPlan rule(TopNExec topNExec) { private boolean canPushDownOrders(List orders) { // allow only exact FieldAttributes (no expressions) for sorting - return orders.stream().allMatch(o -> o.child() instanceof FieldAttribute fa && fa.getExactInfo().hasExact()); + return orders.stream() + .allMatch(o -> o.child() instanceof FieldAttribute fa && fa.getExactInfo().hasExact() && isAggregatable(fa)); } private List buildFieldSorts(List orders) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index 2ab3feef9fa51..3eec19961d597 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -39,10 +39,10 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index ff6e7f4aa2736..83dd0ff4ed1c6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -15,6 +15,8 @@ import java.util.Collection; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; @@ -63,6 +65,12 @@ public DataType commonType(DataType left, DataType right) { if (isDateTime(left) && isTemporalAmount(right) || isTemporalAmount(left) && isDateTime(right)) { return DataTypes.DATETIME; } + if (left == TIME_DURATION && right == TIME_DURATION) { + return TIME_DURATION; + } + if (left == DATE_PERIOD && right == DATE_PERIOD) { + return DATE_PERIOD; + } return DataTypeConverter.commonType(left, right); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 6e480749efb21..06050e41f73de 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -213,13 +213,13 @@ public void testProjectStar() { assertProjection(""" from test | keep * - """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary"); } public void testNoProjection() { assertProjection(""" from test - """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary"); assertProjectionTypes( """ from test @@ -232,6 +232,7 @@ public void testNoProjection() { DataTypes.KEYWORD, DataTypes.INTEGER, DataTypes.KEYWORD, + DataTypes.LONG, DataTypes.INTEGER ); } @@ -240,7 +241,7 @@ public void testProjectOrder() { assertProjection(""" from test | keep first_name, *, last_name - """, "first_name", "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary", "last_name"); + """, "first_name", "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "long_noidx", "salary", "last_name"); } public void testProjectThenDropName() { @@ -272,21 +273,21 @@ public void testProjectDropPattern() { from test | keep * | drop *_name - """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary"); + """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "long_noidx", "salary"); } public void testProjectDropNoStarPattern() { assertProjection(""" from test | drop *_name - """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary"); + """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "long_noidx", "salary"); } public void testProjectOrderPatternWithRest() { assertProjection(""" from test | keep *name, *, emp_no - """, "first_name", "last_name", "_meta_field", "gender", "job", "job.raw", "languages", "salary", "emp_no"); + """, "first_name", "last_name", "_meta_field", "gender", "job", "job.raw", "languages", "long_noidx", "salary", "emp_no"); } public void testProjectDropPatternAndKeepOthers() { @@ -423,7 +424,7 @@ public void testDropPatternUnsupportedFields() { assertProjection(""" from test | drop *ala* - """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx"); } public void testDropUnsupportedPattern() { @@ -491,7 +492,7 @@ public void testRenameReuseAlias() { assertProjection(""" from test | rename emp_no as e, first_name as e - """, "_meta_field", "e", "gender", "job", "job.raw", "languages", "last_name", "salary"); + """, "_meta_field", "e", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary"); } public void testRenameUnsupportedField() { @@ -916,36 +917,36 @@ public void testDateFormatOnText() { public void testDateFormatWithNumericFormat() { verifyUnsupported(""" from test - | eval date_format(date, 1) - """, "second argument of [date_format(date, 1)] must be [string], found value [1] type [integer]"); + | eval date_format(1, date) + """, "first argument of [date_format(1, date)] must be [string], found value [1] type [integer]"); } public void testDateFormatWithDateFormat() { verifyUnsupported(""" from test | eval date_format(date, date) - """, "second argument of [date_format(date, date)] must be [string], found value [date] type [datetime]"); + """, "first argument of [date_format(date, date)] must be [string], found value [date] type [datetime]"); } public void testDateParseOnInt() { verifyUnsupported(""" from test - | eval date_parse(int, keyword) - """, "first argument of [date_parse(int, keyword)] must be [string], found value [int] type [integer]"); + | eval date_parse(keyword, int) + """, "second argument of [date_parse(keyword, int)] must be [string], found value [int] type [integer]"); } public void testDateParseOnDate() { verifyUnsupported(""" from test - | eval date_parse(date, keyword) - """, "first argument of [date_parse(date, keyword)] must be [string], found value [date] type [datetime]"); + | eval date_parse(keyword, date) + """, "second argument of [date_parse(keyword, date)] must be [string], found value [date] type [datetime]"); } public void testDateParseOnIntPattern() { verifyUnsupported(""" from test - | eval date_parse(keyword, int) - """, "second argument of [date_parse(keyword, int)] must be [string], found value [int] type [integer]"); + | eval date_parse(int, keyword) + """, "first argument of [date_parse(int, keyword)] must be [string], found value [int] type [integer]"); } public void testDateTruncOnInt() { @@ -976,6 +977,20 @@ public void testDateTruncWithNumericInterval() { """, "second argument of [date_trunc(1, date)] must be [dateperiod or timeduration], found value [1] type [integer]"); } + public void testDateExtractWithSwappedArguments() { + verifyUnsupported(""" + from test + | eval date_extract(date, "year") + """, "function definition has been updated, please swap arguments in [date_extract(date, \"year\")]"); + } + + public void testDateFormatWithSwappedArguments() { + verifyUnsupported(""" + from test + | eval date_format(date, "yyyy-MM-dd") + """, "function definition has been updated, please swap arguments in [date_format(date, \"yyyy-MM-dd\")]"); + } + public void testDateTruncWithSwappedArguments() { verifyUnsupported(""" from test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 8740b04298c23..3c1a9800d6d11 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -269,6 +269,21 @@ public void testSubtractDateTimeFromTemporal() { } } + public void testPeriodAndDurationInEval() { + for (var unit : List.of("millisecond", "second", "minute", "hour")) { + assertEquals( + "1:18: EVAL does not support type [time_duration] in expression [1 " + unit + "]", + error("row x = 1 | eval y = 1 " + unit) + ); + } + for (var unit : List.of("day", "week", "month", "year")) { + assertEquals( + "1:18: EVAL does not support type [date_period] in expression [1 " + unit + "]", + error("row x = 1 | eval y = 1 " + unit) + ); + } + } + private String error(String query) { return error(query, defaultAnalyzer); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 017034eba9c64..d7c962ae15a20 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -87,14 +87,14 @@ public static Literal randomLiteral(DataType type) { case "short" -> randomShort(); case "integer" -> randomInt(); case "unsigned_long", "long" -> randomLong(); - case "date_period" -> Period.ofDays(randomInt(10)); + case "date_period" -> Period.of(randomIntBetween(-1000, 1000), randomIntBetween(-13, 13), randomIntBetween(-32, 32)); case "datetime" -> randomMillisUpToYear9999(); case "double", "scaled_float" -> randomDouble(); case "float" -> randomFloat(); case "half_float" -> HalfFloatPoint.sortableShortToHalfFloat(HalfFloatPoint.halfFloatToSortableShort(randomFloat())); case "keyword" -> new BytesRef(randomAlphaOfLength(5)); case "ip" -> new BytesRef(InetAddressPoint.encode(randomIp(randomBoolean()))); - case "time_duration" -> Duration.ofMillis(randomNonNegativeLong()); + case "time_duration" -> Duration.ofNanos(randomLongBetween(-604800000000000L, 604800000000000L)); case "text" -> new BytesRef(randomAlphaOfLength(50)); case "version" -> new Version(randomIdentifier()).toBytesRef(); case "null" -> null; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index a87e7c5eb5bb1..96c35905e3dc0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -39,10 +39,10 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Date Extract Year", () -> { return new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier.TypedData(1687944333000L, DataTypes.DATETIME, "date"), - new TestCaseSupplier.TypedData(new BytesRef("YEAR"), DataTypes.KEYWORD, "field") + new TestCaseSupplier.TypedData(new BytesRef("YEAR"), DataTypes.KEYWORD, "field"), + new TestCaseSupplier.TypedData(1687944333000L, DataTypes.DATETIME, "date") ), - "DateExtractEvaluator[value=Attribute[channel=0], chronoField=Attribute[channel=1], zone=Z]", + "DateExtractEvaluator[value=Attribute[channel=1], chronoField=Attribute[channel=0], zone=Z]", DataTypes.LONG, equalTo(2023L) ); @@ -55,8 +55,8 @@ public void testAllChronoFields() { for (ChronoField value : ChronoField.values()) { DateExtract instance = new DateExtract( Source.EMPTY, - new Literal(Source.EMPTY, epochMilli, DataTypes.DATETIME), new Literal(Source.EMPTY, new BytesRef(value.name()), DataTypes.KEYWORD), + new Literal(Source.EMPTY, epochMilli, DataTypes.DATETIME), EsqlTestUtils.TEST_CFG ); @@ -75,7 +75,7 @@ protected Expression build(Source source, List args) { @Override protected List argSpec() { - return List.of(required(DataTypes.DATETIME), required(strings())); + return List.of(required(strings()), required(DataTypes.DATETIME)); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index 48e70e929f8e1..115892640f2b1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -33,10 +33,10 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Basic Case", () -> { return new TestCaseSupplier.TestCase( List.of( - new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataTypes.KEYWORD, "first"), - new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataTypes.KEYWORD, "second") + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataTypes.KEYWORD, "second"), + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataTypes.KEYWORD, "first") ), - "DateParseEvaluator[val=Attribute[channel=0], formatter=Attribute[channel=1], zoneId=Z]", + "DateParseEvaluator[val=Attribute[channel=1], formatter=Attribute[channel=0], zoneId=Z]", DataTypes.DATETIME, equalTo(1683244800000L) ); @@ -50,7 +50,7 @@ protected Expression build(Source source, List args) { @Override protected List argSpec() { - return List.of(required(strings()), optional(strings())); + return List.of(optional(strings()), required(strings())); } @Override diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index 805eb3fd557c9..454c8d2ae5a6e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -115,6 +115,18 @@ public static Iterable parameters() { DataTypes.DATETIME, equalTo(asMillis(asDateTime(rhs).plus(lhs))) ); + }), new TestCaseSupplier("Period + Period", () -> { + Period lhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.DATE_PERIOD, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.DATE_PERIOD, + equalTo(lhs.plus(rhs)) + ); }), new TestCaseSupplier("Datetime + Duration", () -> { long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); @@ -139,6 +151,18 @@ public static Iterable parameters() { DataTypes.DATETIME, equalTo(asMillis(asDateTime(lhs).plus(rhs))) ); + }), new TestCaseSupplier("Duration + Duration", () -> { + Duration lhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.TIME_DURATION, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.TIME_DURATION, + equalTo(lhs.plus(rhs)) + ); }))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index 11496154f0809..4a9056b0de594 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -103,6 +103,18 @@ public static Iterable parameters() { DataTypes.DATETIME, equalTo(asMillis(asDateTime(lhs).minus(rhs))) ); + }), new TestCaseSupplier("Period - Period", () -> { + Period lhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.DATE_PERIOD, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.DATE_PERIOD, + equalTo(lhs.minus(rhs)) + ); }), new TestCaseSupplier("Datetime - Duration", () -> { long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); @@ -116,6 +128,18 @@ public static Iterable parameters() { equalTo(asMillis(asDateTime(lhs).minus(rhs))) ); return testCase; + }), new TestCaseSupplier("Duration - Duration", () -> { + Duration lhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.TIME_DURATION, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs") + ), + "Only folding possible, so there's no evaluator", + EsqlDataTypes.TIME_DURATION, + equalTo(lhs.minus(rhs)) + ); }))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index f242c1e082829..7a82ab32b7ef3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -246,7 +246,19 @@ public void testMissingFieldInFilterNoProjection() { var local = as(localPlan, LocalRelation.class); assertThat( Expressions.names(local.output()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary", "x") + contains( + "_meta_field", + "emp_no", + "first_name", + "gender", + "job", + "job.raw", + "languages", + "last_name", + "long_noidx", + "salary", + "x" + ) ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 746a34eaedce4..79add5bc08e6b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -390,7 +390,7 @@ public void testExtractorMultiEvalWithDifferentNames() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") ); } @@ -420,7 +420,7 @@ public void testExtractorMultiEvalWithSameName() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") ); } @@ -877,7 +877,7 @@ public void testPushLimitAndFilterToSource() { assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "long_noidx", "salary") ); var source = source(extract.child()); @@ -1683,6 +1683,24 @@ public void testNoTextFilterPushDown() { assertNull(source.query()); } + public void testNoNonIndexedFilterPushDown() { + var plan = physicalPlan(""" + from test + | where long_noidx == 1 + """); + + var optimized = optimizedPlan(plan); + var limit = as(optimized, LimitExec.class); + var exchange = asRemoteExchange(limit.child()); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var limit2 = as(extract.child(), LimitExec.class); + var filter = as(limit2.child(), FilterExec.class); + var extract2 = as(filter.child(), FieldExtractExec.class); + var source = source(extract2.child()); + assertNull(source.query()); + } + public void testTextWithRawFilterPushDown() { var plan = physicalPlan(""" from test @@ -1716,6 +1734,23 @@ public void testNoTextSortPushDown() { assertNull(source.sorts()); } + public void testNoNonIndexedSortPushDown() { + var plan = physicalPlan(""" + from test + | sort long_noidx + """); + + var optimized = optimizedPlan(plan); + var topN = as(optimized, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var topN2 = as(extract.child(), TopNExec.class); + var extract2 = as(topN2.child(), FieldExtractExec.class); + var source = source(extract2.child()); + assertNull(source.sorts()); + } + public void testTextWithRawSortPushDown() { var plan = physicalPlan(""" from test diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 7956892c34645..efe8e773bfdaa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -103,8 +103,8 @@ public static List params() { DOUBLE1, literal, new Length(Source.EMPTY, literal), - new DateFormat(Source.EMPTY, DATE, datePattern, TEST_CONFIG), - new DateFormat(Source.EMPTY, literal, datePattern, TEST_CONFIG), + new DateFormat(Source.EMPTY, datePattern, DATE, TEST_CONFIG), + new DateFormat(Source.EMPTY, datePattern, literal, TEST_CONFIG), new StartsWith(Source.EMPTY, literal, literal), new Substring(Source.EMPTY, literal, LONG, LONG), new DateTrunc(Source.EMPTY, dateInterval, DATE) }) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java index bbd6906221aa5..7e3b9117b6410 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -236,8 +236,8 @@ public void testEvalDateParseDynamic() { from employees | where emp_no == 10039 or emp_no == 10040 | sort emp_no - | eval birth_date_string = date_format(birth_date, "yyyy-MM-dd") - | eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") + | eval birth_date_string = date_format("yyyy-MM-dd", birth_date) + | eval new_date = date_parse("yyyy-MM-dd", birth_date_string) | eval bool = new_date == birth_date | keep emp_no, new_date, birth_date, bool""", Set.of("emp_no", "emp_no.*", "birth_date", "birth_date.*")); } @@ -246,7 +246,7 @@ public void testDateFields() { assertFieldNames(""" from employees | where emp_no == 10049 or emp_no == 10050 - | eval year = date_extract(birth_date, "year"), month = date_extract(birth_date, "month_of_year") + | eval year = date_extract("year", birth_date), month = date_extract("month_of_year", birth_date) | keep emp_no, year, month""", Set.of("emp_no", "emp_no.*", "birth_date", "birth_date.*")); } @@ -793,7 +793,7 @@ public void testByStringAndLongWithAlias() { public void testByStringAndString() { assertFieldNames(""" from employees - | eval hire_year_str = date_format(hire_date, "yyyy") + | eval hire_year_str = date_format("yyyy", hire_date) | stats c = count(gender) by gender, hire_year_str | sort c desc, gender, hire_year_str | where c >= 5""", Set.of("hire_date", "hire_date.*", "gender", "gender.*")); @@ -822,7 +822,7 @@ public void testCountDistinctOfKeywords() { assertFieldNames( """ from employees - | eval hire_year_str = date_format(hire_date, "yyyy") + | eval hire_year_str = date_format("yyyy", hire_date) | stats g = count_distinct(gender), h = count_distinct(hire_year_str)""", Set.of("hire_date", "hire_date.*", "gender", "gender.*") ); diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java index 009e17bd79e6b..1a088f9aa1de7 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/Fleet.java @@ -43,8 +43,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java index d5cea8ac5caa5..3261ac9d9bdb5 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/IdentityProviderPlugin.java @@ -32,8 +32,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackPlugin; diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java index b084826bc01ca..18bf02fdfd8a1 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/UpdateSettingsStepTests.java @@ -26,9 +26,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ilm.Step.StepKey; diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index cf3021612abd8..6cd807db27566 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -38,8 +38,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java index 1fee5385cea2a..aa3f6e6634678 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/InferencePlugin.java @@ -33,8 +33,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.ClientHelper; diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java index 7c542e8acd22b..602048f2e3e76 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeService.java @@ -44,12 +44,19 @@ public static ElserMlNodeModel parseConfig( Map settings ) { Map serviceSettingsMap = removeFromMapOrThrowIfNull(settings, Model.SERVICE_SETTINGS); - Map taskSettingsMap = removeFromMapOrThrowIfNull(settings, Model.TASK_SETTINGS); - var serviceSettings = serviceSettingsFromMap(serviceSettingsMap); + + Map taskSettingsMap; + // task settings are optional + if (settings.containsKey(Model.TASK_SETTINGS)) { + taskSettingsMap = removeFromMapOrThrowIfNull(settings, Model.TASK_SETTINGS); + } else { + taskSettingsMap = Map.of(); + } + var taskSettings = taskSettingsFromMap(taskType, taskSettingsMap); - if (throwOnUnknownFields == false) { + if (throwOnUnknownFields) { throwIfNotEmptyMap(settings); throwIfNotEmptyMap(serviceSettingsMap); throwIfNotEmptyMap(taskSettingsMap); @@ -133,8 +140,6 @@ private static ElserMlNodeTaskSettings taskSettingsFromMap(TaskType taskType, Ma } // no config options yet - throwIfNotEmptyMap(config); - return ElserMlNodeTaskSettings.DEFAULT; } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java index 008e6a8c17653..bdbb4c545900c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/services/elser/ElserMlNodeServiceTests.java @@ -7,10 +7,18 @@ package org.elasticsearch.xpack.inference.services.elser; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.inference.Model; import org.elasticsearch.xpack.inference.TaskType; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsString; +import static org.mockito.Mockito.mock; + public class ElserMlNodeServiceTests extends ESTestCase { public static Model randomModelConfig(String modelId, TaskType taskType) { @@ -25,4 +33,124 @@ public static Model randomModelConfig(String modelId, TaskType taskType) { default -> throw new IllegalArgumentException("task type " + taskType + " is not supported"); }; } + + public void testParseConfigStrict() { + var service = new ElserMlNodeService(mock(Client.class)); + + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + settings.put(Model.TASK_SETTINGS, Map.of()); + + ElserMlNodeModel parsedModel = service.parseConfigStrict("foo", TaskType.SPARSE_EMBEDDING, settings); + + assertEquals( + new ElserMlNodeModel( + "foo", + TaskType.SPARSE_EMBEDDING, + ElserMlNodeService.NAME, + new ElserMlNodeServiceSettings(1, 4), + ElserMlNodeTaskSettings.DEFAULT + ), + parsedModel + ); + } + + public void testParseConfigStrictWithNoTaskSettings() { + var service = new ElserMlNodeService(mock(Client.class)); + + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + + ElserMlNodeModel parsedModel = service.parseConfigStrict("foo", TaskType.SPARSE_EMBEDDING, settings); + + assertEquals( + new ElserMlNodeModel( + "foo", + TaskType.SPARSE_EMBEDDING, + ElserMlNodeService.NAME, + new ElserMlNodeServiceSettings(1, 4), + ElserMlNodeTaskSettings.DEFAULT + ), + parsedModel + ); + } + + public void testParseConfigStrictWithUnknownSettings() { + + for (boolean throwOnUnknown : new boolean[] { true, false }) { + { + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + settings.put(Model.TASK_SETTINGS, Map.of()); + settings.put("foo", "bar"); + + if (throwOnUnknown) { + var e = expectThrows( + ElasticsearchStatusException.class, + () -> ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings) + ); + assertThat( + e.getMessage(), + containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") + ); + } else { + var parsed = ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings); + } + } + + { + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>(Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4)) + ); + settings.put(Model.TASK_SETTINGS, Map.of("foo", "bar")); + + if (throwOnUnknown) { + var e = expectThrows( + ElasticsearchStatusException.class, + () -> ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings) + ); + assertThat( + e.getMessage(), + containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") + ); + } else { + var parsed = ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings); + } + } + + { + var settings = new HashMap(); + settings.put( + Model.SERVICE_SETTINGS, + new HashMap<>( + Map.of(ElserMlNodeServiceSettings.NUM_ALLOCATIONS, 1, ElserMlNodeServiceSettings.NUM_THREADS, 4, "foo", "bar") + ) + ); + + if (throwOnUnknown) { + var e = expectThrows( + ElasticsearchStatusException.class, + () -> ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings) + ); + assertThat( + e.getMessage(), + containsString("Model configuration contains settings [{foo=bar}] unknown to the [elser_mlnode] service") + ); + } else { + var parsed = ElserMlNodeService.parseConfig(throwOnUnknown, "foo", TaskType.SPARSE_EMBEDDING, settings); + } + } + } + } } diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java index b74cb64f708c1..8f5f1262c4d82 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/SourceValueFetcherSortedUnsignedLongIndexFieldData.java @@ -128,7 +128,7 @@ public boolean advanceExact(int doc) throws IOException { values.sort(Long::compare); iterator = values.iterator(); - return true; + return values.isEmpty() == false; } @Override diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoader.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoader.java index 46ba695624f60..2afeda1f13512 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoader.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/MachineLearningPackageLoader.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.plugins.ActionPlugin; @@ -88,6 +89,11 @@ public BootstrapCheckResult check(BootstrapContext context) { public boolean alwaysEnforce() { return true; } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECKS; + } }); } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java index 113cb2c092376..5a6eac0cc3b76 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelImporter.java @@ -40,7 +40,7 @@ * A helper class for abstracting out the use of the ModelLoaderUtils to make dependency injection testing easier. */ class ModelImporter { - private static final int DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024; // 4MB + private static final int DEFAULT_CHUNK_SIZE = 1024 * 1024; // 1MB private static final Logger logger = LogManager.getLogger(ModelImporter.class); private final Client client; private final String modelId; diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java index 5059ec50708e2..2e1a1829c0244 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlAutoUpdateServiceIT.java @@ -6,7 +6,6 @@ */ package org.elasticsearch.xpack.ml.integration; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -18,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.xcontent.XContentType; @@ -111,7 +111,7 @@ public void testAutomaticModelUpdate() throws Exception { .masterNodeId("node_id") .build() ) - .putTransportVersion("node_id", TransportVersion.current()) + .putCompatibilityVersions("node_id", CompatibilityVersionsUtils.staticCurrent()) .build(), ClusterState.builder(new ClusterName("test")).build() ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 59f3f41371b53..9b8009e234911 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -77,10 +77,10 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index 9b79754a5afe9..d37edcd85946a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -27,6 +26,7 @@ import org.elasticsearch.cluster.node.VersionInformation; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -124,7 +124,7 @@ public void testUpdateModelRoutingTable() { .add(buildNode(startedNode, true, ByteSizeValue.ofGb(4).getBytes(), 8)) .build() ) - .putTransportVersion(nodeId, TransportVersion.current()) + .putCompatibilityVersions(nodeId, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -238,7 +238,7 @@ public void testRemoveAssignment() { ClusterState clusterStateWithAssignment = ClusterState.builder(new ClusterName("testRemoveAssignment")) .nodes(DiscoveryNodes.builder().add(buildNode("test-node", true, ByteSizeValue.ofGb(4).getBytes(), 8)).build()) - .putTransportVersion("test-node", TransportVersion.current()) + .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -270,7 +270,7 @@ public void testRemoveAllAssignments() { ClusterState clusterStateWithAssignments = ClusterState.builder(new ClusterName("testRemoveAllAssignments")) .nodes(DiscoveryNodes.builder().add(buildNode("test-node", true, ByteSizeValue.ofGb(4).getBytes(), 8)).build()) - .putTransportVersion("test-node", TransportVersion.current()) + .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) @@ -367,7 +367,7 @@ public void testCreateAssignmentWhileResetModeIsTrue() throws InterruptedExcepti ClusterState currentState = ClusterState.builder(new ClusterName("testCreateAssignment")) .nodes(discoveryNodes) - .putTransportVersion("ml-node-with-room", TransportVersion.current()) + .putCompatibilityVersions("ml-node-with-room", CompatibilityVersionsUtils.staticCurrent()) .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build())) .build(); when(clusterService.state()).thenReturn(currentState); @@ -1768,7 +1768,7 @@ private static ClusterState createClusterState(List nodeIds, Metadata me .toArray(DiscoveryNode[]::new); ClusterState.Builder csBuilder = csBuilderWithNodes("test", nodes); - nodeIds.forEach(id -> csBuilder.putTransportVersion(id, TransportVersion.current())); + nodeIds.forEach(id -> csBuilder.putCompatibilityVersions(id, CompatibilityVersionsUtils.staticCurrent())); return csBuilder.metadata(metadata).build(); } @@ -1810,7 +1810,7 @@ public void testSetAllocationToStopping() { ClusterState clusterStateWithAllocation = ClusterState.builder(new ClusterName("testSetAllocationToStopping")) .nodes(DiscoveryNodes.builder().add(buildNode("test-node", true, ByteSizeValue.ofGb(4).getBytes(), 8)).build()) - .putTransportVersion("test-node", TransportVersion.current()) + .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java index b0903cf47dc88..0bd2e716758e4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ml.inference.assignment; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.ShardSearchFailure; @@ -21,6 +20,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; @@ -569,7 +569,7 @@ public void testClusterChanged() throws Exception { "testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -627,7 +627,7 @@ public void testClusterChanged() throws Exception { "testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -680,7 +680,7 @@ public void testClusterChanged() throws Exception { "testClusterChanged", ClusterState.builder(new ClusterName("testClusterChanged")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( @@ -721,7 +721,7 @@ public void testClusterChanged_GivenAllStartedAssignments_AndNonMatchingTargetAl "shouldUpdateAllocations", ClusterState.builder(new ClusterName("shouldUpdateAllocations")) .nodes(nodes) - .putTransportVersion(NODE_ID, TransportVersion.current()) + .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() .putCustom( diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java index de02debef27a4..2189d759842f4 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/Monitoring.java @@ -34,8 +34,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackPlugin; diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java index 915935a2d6b24..43d7bbdec4178 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java @@ -57,8 +57,8 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotRestoreException; import org.elasticsearch.snapshots.sourceonly.SourceOnlySnapshotRepository; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackPlugin; diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 599524915562c..c8ee6d91a1e47 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -10,11 +10,6 @@ import java.util.List; public class GetStackTracesActionIT extends ProfilingTestCase { - @Override - protected boolean useOnlyAllEvents() { - return randomBoolean(); - } - public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1, null); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java new file mode 100644 index 0000000000000..7cd5b08ee773f --- /dev/null +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStatusActionIT.java @@ -0,0 +1,51 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.rest.RestStatus; + +public class GetStatusActionIT extends ProfilingTestCase { + @Override + protected boolean requiresDataSetup() { + // We need explicit control whether index template management is enabled, and thus we skip data setup. + return false; + } + + public void testTimeoutIfResourcesNotCreated() throws Exception { + updateProfilingTemplatesEnabled(false); + GetStatusAction.Request request = new GetStatusAction.Request(); + request.waitForResourcesCreated(true); + // shorter than the default timeout to avoid excessively long execution + request.timeout(TimeValue.timeValueSeconds(15)); + + GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); + assertEquals(RestStatus.REQUEST_TIMEOUT, response.status()); + assertFalse(response.isResourcesCreated()); + } + + public void testNoTimeoutIfNotWaiting() throws Exception { + updateProfilingTemplatesEnabled(false); + GetStatusAction.Request request = new GetStatusAction.Request(); + request.waitForResourcesCreated(false); + + GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); + assertEquals(RestStatus.OK, response.status()); + assertFalse(response.isResourcesCreated()); + } + + public void testWaitsUntilResourcesAreCreated() throws Exception { + updateProfilingTemplatesEnabled(true); + GetStatusAction.Request request = new GetStatusAction.Request(); + request.waitForResourcesCreated(true); + + GetStatusAction.Response response = client().execute(GetStatusAction.INSTANCE, request).get(); + assertEquals(RestStatus.OK, response.status()); + assertTrue(response.isResourcesCreated()); + } +} diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 240f05f6b4335..f15925b7c891b 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -84,7 +84,16 @@ private void indexDoc(String index, String id, Map source) { * * @return true iff this test should rely on only "profiling-events-all" being present. */ - protected abstract boolean useOnlyAllEvents(); + protected boolean useOnlyAllEvents() { + return randomBoolean(); + } + + /** + * @return true iff this test relies that data (and the corresponding indices / data streams) are present for this test. + */ + protected boolean requiresDataSetup() { + return true; + } protected void waitForIndices() throws Exception { assertBusy(() -> { @@ -110,6 +119,9 @@ protected void updateProfilingTemplatesEnabled(boolean newValue) { @Before public void setupData() throws Exception { + if (requiresDataSetup() == false) { + return; + } // only enable index management while setting up indices to avoid interfering with the rest of the test infrastructure updateProfilingTemplatesEnabled(true); Collection eventsIndices = useOnlyAllEvents() ? List.of(EventsIndex.FULL_INDEX.getName()) : EventsIndex.indexNames(); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/Frame.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/Frame.java new file mode 100644 index 0000000000000..42d830ed00477 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/Frame.java @@ -0,0 +1,10 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +public record Frame(String fileName, String functionName, int functionOffset, int lineNumber, boolean inline) {} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java new file mode 100644 index 0000000000000..79f8632238d4c --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.action.ActionType; + +public final class GetFlamegraphAction extends ActionType { + public static final GetFlamegraphAction INSTANCE = new GetFlamegraphAction(); + public static final String NAME = "indices:data/read/profiling/flamegraph"; + + private GetFlamegraphAction() { + super(NAME, GetFlamegraphResponse::new); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java new file mode 100644 index 0000000000000..0ab9060aa8936 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -0,0 +1,153 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.common.xcontent.ChunkedToXContentObject; +import org.elasticsearch.xcontent.ToXContent; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +public class GetFlamegraphResponse extends ActionResponse implements ChunkedToXContentObject { + private final int size; + private final double samplingRate; + private final List> edges; + private final List fileIds; + private final List frameTypes; + private final List inlineFrames; + private final List fileNames; + private final List addressOrLines; + private final List functionNames; + private final List functionOffsets; + private final List sourceFileNames; + private final List sourceLines; + private final List countInclusive; + private final List countExclusive; + + public GetFlamegraphResponse(StreamInput in) throws IOException { + this.size = in.readInt(); + this.samplingRate = in.readDouble(); + this.edges = in.readCollectionAsList(i -> i.readMap(StreamInput::readInt)); + this.fileIds = in.readCollectionAsList(StreamInput::readString); + this.frameTypes = in.readCollectionAsList(StreamInput::readInt); + this.inlineFrames = in.readCollectionAsList(StreamInput::readBoolean); + this.fileNames = in.readCollectionAsList(StreamInput::readString); + this.addressOrLines = in.readCollectionAsList(StreamInput::readInt); + this.functionNames = in.readCollectionAsList(StreamInput::readString); + this.functionOffsets = in.readCollectionAsList(StreamInput::readInt); + this.sourceFileNames = in.readCollectionAsList(StreamInput::readString); + this.sourceLines = in.readCollectionAsList(StreamInput::readInt); + this.countInclusive = in.readCollectionAsList(StreamInput::readInt); + this.countExclusive = in.readCollectionAsList(StreamInput::readInt); + } + + public GetFlamegraphResponse( + int size, + double samplingRate, + List> edges, + List fileIds, + List frameTypes, + List inlineFrames, + List fileNames, + List addressOrLines, + List functionNames, + List functionOffsets, + List sourceFileNames, + List sourceLines, + List countInclusive, + List countExclusive + ) { + this.size = size; + this.samplingRate = samplingRate; + this.edges = edges; + this.fileIds = fileIds; + this.frameTypes = frameTypes; + this.inlineFrames = inlineFrames; + this.fileNames = fileNames; + this.addressOrLines = addressOrLines; + this.functionNames = functionNames; + this.functionOffsets = functionOffsets; + this.sourceFileNames = sourceFileNames; + this.sourceLines = sourceLines; + this.countInclusive = countInclusive; + this.countExclusive = countExclusive; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(this.size); + out.writeDouble(this.samplingRate); + out.writeCollection(this.edges, (o, v) -> o.writeMap(v, StreamOutput::writeString, StreamOutput::writeInt)); + out.writeCollection(this.fileIds, StreamOutput::writeString); + out.writeCollection(this.frameTypes, StreamOutput::writeInt); + out.writeCollection(this.inlineFrames, StreamOutput::writeBoolean); + out.writeCollection(this.fileNames, StreamOutput::writeString); + out.writeCollection(this.addressOrLines, StreamOutput::writeInt); + out.writeCollection(this.functionNames, StreamOutput::writeString); + out.writeCollection(this.functionOffsets, StreamOutput::writeInt); + out.writeCollection(this.sourceFileNames, StreamOutput::writeString); + out.writeCollection(this.sourceLines, StreamOutput::writeInt); + out.writeCollection(this.countInclusive, StreamOutput::writeInt); + out.writeCollection(this.countExclusive, StreamOutput::writeInt); + } + + public int getSize() { + return size; + } + + public double getSamplingRate() { + return samplingRate; + } + + public List getCountInclusive() { + return countInclusive; + } + + public List getCountExclusive() { + return countExclusive; + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + return Iterators.concat( + ChunkedToXContentHelper.startObject(), + ChunkedToXContentHelper.array( + "Edges", + Iterators.flatMap( + edges.iterator(), + perNodeEdges -> Iterators.concat( + ChunkedToXContentHelper.startArray(), + Iterators.map(perNodeEdges.entrySet().iterator(), edge -> (b, p) -> b.value(edge.getValue())), + ChunkedToXContentHelper.endArray() + ) + ) + ), + ChunkedToXContentHelper.array("FileID", Iterators.map(fileIds.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("FrameType", Iterators.map(frameTypes.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("Inline", Iterators.map(inlineFrames.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("ExeFilename", Iterators.map(fileNames.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("AddressOrLine", Iterators.map(addressOrLines.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("FunctionName", Iterators.map(functionNames.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("FunctionOffset", Iterators.map(functionOffsets.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("SourceFilename", Iterators.map(sourceFileNames.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("SourceLine", Iterators.map(sourceLines.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("CountInclusive", Iterators.map(countInclusive.iterator(), e -> (b, p) -> b.value(e))), + ChunkedToXContentHelper.array("CountExclusive", Iterators.map(countExclusive.iterator(), e -> (b, p) -> b.value(e))), + Iterators.single((b, p) -> b.field("Size", size)), + Iterators.single((b, p) -> b.field("SamplingRate", samplingRate)), + ChunkedToXContentHelper.endObject() + ); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java index 244d9cd3fd830..4083776f8c4a6 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java @@ -41,6 +41,11 @@ public class GetStackTracesRequest extends ActionRequest implements IndicesReque private Integer sampleSize; + // We intentionally don't expose this field via the REST API but we can control behavior within Elasticsearch. + // Once we have migrated all client-side code to dedicated APIs (such as the flamegraph API), we can adjust + // sample counts by default and remove this flag. + private Boolean adjustSampleCount; + public GetStackTracesRequest() { this(null, null); } @@ -53,12 +58,14 @@ public GetStackTracesRequest(Integer sampleSize, QueryBuilder query) { public GetStackTracesRequest(StreamInput in) throws IOException { this.query = in.readOptionalNamedWriteable(QueryBuilder.class); this.sampleSize = in.readOptionalInt(); + this.adjustSampleCount = in.readOptionalBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalNamedWriteable(query); out.writeOptionalInt(sampleSize); + out.writeOptionalBoolean(adjustSampleCount); } public Integer getSampleSize() { @@ -69,6 +76,14 @@ public QueryBuilder getQuery() { return query; } + public boolean isAdjustSampleCount() { + return Boolean.TRUE.equals(adjustSampleCount); + } + + public void setAdjustSampleCount(Boolean adjustSampleCount) { + this.adjustSampleCount = adjustSampleCount; + } + public void parseXContent(XContentParser parser) throws IOException { XContentParser.Token token = parser.currentToken(); String currentFieldName = null; diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java index 7a6fba1f04c84..72fed7376bde5 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java @@ -137,6 +137,10 @@ public int getTotalFrames() { return totalFrames; } + public double getSamplingRate() { + return samplingRate; + } + @Override public Iterator toXContentChunked(ToXContent.Params params) { return Iterators.concat( @@ -147,6 +151,7 @@ public Iterator toXContentChunked(ToXContent.Params params optional("stack_trace_events", stackTraceEvents, ChunkedToXContentHelper::map), Iterators.single((b, p) -> b.field("total_frames", totalFrames)), Iterators.single((b, p) -> b.field("sampling_rate", samplingRate)), + // start and end are intentionally not written to the XContent representation because we only need them on the transport layer ChunkedToXContentHelper.endObject() ); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java index 31540cffef010..8566978decaa8 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStatusAction.java @@ -14,7 +14,8 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.StatusToXContentObject; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; @@ -28,12 +29,13 @@ protected GetStatusAction() { super(NAME, GetStatusAction.Response::new); } - public static class Response extends ActionResponse implements ToXContentObject { + public static class Response extends ActionResponse implements StatusToXContentObject { private boolean profilingEnabled; private boolean resourceManagementEnabled; private boolean resourcesCreated; private boolean pre891Data; + private boolean timedOut; public Response(StreamInput in) throws IOException { super(in); @@ -41,6 +43,7 @@ public Response(StreamInput in) throws IOException { resourceManagementEnabled = in.readBoolean(); resourcesCreated = in.readBoolean(); pre891Data = in.readBoolean(); + timedOut = in.readBoolean(); } public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boolean resourcesCreated, boolean pre891Data) { @@ -50,6 +53,14 @@ public Response(boolean profilingEnabled, boolean resourceManagementEnabled, boo this.pre891Data = pre891Data; } + public void setTimedOut(boolean timedOut) { + this.timedOut = timedOut; + } + + public boolean isResourcesCreated() { + return resourcesCreated; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -66,6 +77,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(resourceManagementEnabled); out.writeBoolean(resourcesCreated); out.writeBoolean(pre891Data); + out.writeBoolean(timedOut); } @Override @@ -76,12 +88,13 @@ public boolean equals(Object o) { return profilingEnabled == response.profilingEnabled && resourceManagementEnabled == response.resourceManagementEnabled && resourcesCreated == response.resourcesCreated - && pre891Data == response.pre891Data; + && pre891Data == response.pre891Data + && timedOut == response.timedOut; } @Override public int hashCode() { - return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated, pre891Data); + return Objects.hash(profilingEnabled, resourceManagementEnabled, resourcesCreated, pre891Data, timedOut); } @Override @@ -89,16 +102,30 @@ public String toString() { return Strings.toString(this, true, true); } + @Override + public RestStatus status() { + return timedOut ? RestStatus.REQUEST_TIMEOUT : RestStatus.OK; + } } public static class Request extends AcknowledgedRequest { + private boolean waitForResourcesCreated; public Request(StreamInput in) throws IOException { super(in); + waitForResourcesCreated = in.readBoolean(); } public Request() {} + public boolean waitForResourcesCreated() { + return waitForResourcesCreated; + } + + public void waitForResourcesCreated(boolean waitForResourcesCreated) { + this.waitForResourcesCreated = waitForResourcesCreated; + } + @Override public ActionRequestValidationException validate() { return null; @@ -107,6 +134,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); + out.writeBoolean(waitForResourcesCreated); } } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java index 49e436ea4251b..037f57b36d547 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java @@ -33,10 +33,10 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackSettings; @@ -144,6 +144,7 @@ public List getRestHandlers( handlers.add(new RestGetStatusAction()); if (enabled) { handlers.add(new RestGetStackTracesAction()); + handlers.add(new RestGetFlamegraphAction()); } return Collections.unmodifiableList(handlers); } @@ -177,6 +178,7 @@ public static ExecutorBuilder responseExecutorBuilder() { public List> getActions() { return List.of( new ActionHandler<>(GetStackTracesAction.INSTANCE, TransportGetStackTracesAction.class), + new ActionHandler<>(GetFlamegraphAction.INSTANCE, TransportGetFlamegraphAction.class), new ActionHandler<>(GetStatusAction.INSTANCE, TransportGetStatusAction.class) ); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/Resampler.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/Resampler.java new file mode 100644 index 0000000000000..b70807e472536 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/Resampler.java @@ -0,0 +1,56 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import java.util.Random; +import java.util.random.RandomGenerator; + +class Resampler { + private final boolean requiresResampling; + private final RandomGenerator r; + private final double adjustedSampleRate; + private final double p; + + Resampler(GetStackTracesRequest request, double sampleRate, long totalCount) { + // Manually reduce sample count if totalCount exceeds sampleSize by 10%. + if (totalCount > request.getSampleSize() * 1.1) { + this.requiresResampling = true; + // Make the RNG predictable to get reproducible results. + this.r = createRandom(request); + this.p = (double) request.getSampleSize() / totalCount; + } else { + this.requiresResampling = false; + this.r = null; + this.p = 1.0d; + } + // TODO: Just use the sample rate as is once all resampling is done server-side + this.adjustedSampleRate = request.isAdjustSampleCount() ? sampleRate : 1.0d; + } + + protected RandomGenerator createRandom(GetStackTracesRequest request) { + return new Random(request.hashCode()); + } + + public int adjustSampleCount(int originalCount) { + int rawCount; + if (requiresResampling) { + rawCount = 0; + for (int i = 0; i < originalCount; i++) { + if (r.nextDouble() < p) { + rawCount++; + } + } + } else { + rawCount = originalCount; + } + // Adjust the sample counts from down-sampled to fully sampled. + // Be aware that downsampling drops entries from stackTraceEvents, so that + // the sum of the upscaled count values is less that totalCount. + return (int) Math.floor(rawCount / (p * adjustedSampleRate)); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java new file mode 100644 index 0000000000000..a23f501de0602 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestActionListener; +import org.elasticsearch.rest.action.RestCancellableNodeClient; +import org.elasticsearch.rest.action.RestChunkedToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestGetFlamegraphAction extends BaseRestHandler { + @Override + public List routes() { + return List.of(new Route(POST, "/_profiling/flamegraph")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + GetStackTracesRequest getStackTracesRequest = new GetStackTracesRequest(); + request.applyContentParser(getStackTracesRequest::parseXContent); + // enforce server-side adjustment of sample counts for flamegraphs + getStackTracesRequest.setAdjustSampleCount(true); + + return channel -> { + RestActionListener listener = new RestChunkedToXContentListener<>(channel); + RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancelClient.execute(GetFlamegraphAction.INSTANCE, getStackTracesRequest, listener); + }; + } + + @Override + public String getName() { + return "get_flamegraph_action"; + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java index c62d6dcad8c1a..714181f3dc0b5 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStatusAction.java @@ -10,7 +10,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.rest.action.RestStatusToXContentListener; import java.util.List; @@ -33,6 +33,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient GetStatusAction.Request request = new GetStatusAction.Request(); request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); - return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestToXContentListener<>(channel)); + request.waitForResourcesCreated(restRequest.paramAsBoolean("wait_for_resources_created", false)); + return channel -> client.execute(GetStatusAction.INSTANCE, request, new RestStatusToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java index 5dc2b212ed55b..eb5134be70adb 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java @@ -12,8 +12,10 @@ import java.io.IOException; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.NoSuchElementException; import java.util.Objects; final class StackFrame implements ToXContentObject { @@ -49,6 +51,15 @@ public static StackFrame fromSource(Map source) { ); } + public boolean isEmpty() { + return fileName.isEmpty() && functionName.isEmpty() && functionOffset.isEmpty() && lineNumber.isEmpty(); + } + + public Iterable frames() { + return new Frames(); + + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -79,4 +90,42 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(fileName, functionName, functionOffset, lineNumber); } + + private class Frames implements Iterable { + @Override + public Iterator iterator() { + return new Iterator<>() { + private int currentElement = 0; + + @Override + public boolean hasNext() { + // array lengths might not be consistent - allow to move until all underlying lists have been exhausted + return currentElement < fileName.size() + || currentElement < functionName.size() + || currentElement < functionOffset.size() + || currentElement < lineNumber.size(); + } + + @Override + public Frame next() { + if (hasNext() == false) { + throw new NoSuchElementException(); + } + Frame f = new Frame( + get(fileName, currentElement, ""), + get(functionName, currentElement, ""), + get(functionOffset, currentElement, 0), + get(lineNumber, currentElement, 0), + currentElement > 0 + ); + currentElement++; + return f; + } + }; + } + + private static T get(List l, int index, T defaultValue) { + return index < l.size() ? l.get(index) : defaultValue; + } + } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java new file mode 100644 index 0000000000000..acef2f6661c02 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java @@ -0,0 +1,276 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ParentTaskAssigningClient; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.core.SuppressForbidden; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.io.File; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.SortedMap; +import java.util.TreeMap; + +public class TransportGetFlamegraphAction extends HandledTransportAction { + private static final Logger log = LogManager.getLogger(TransportGetFlamegraphAction.class); + private static final StackFrame EMPTY_STACKFRAME = new StackFrame("", "", 0, 0); + + private final NodeClient nodeClient; + private final TransportService transportService; + + @Inject + public TransportGetFlamegraphAction(NodeClient nodeClient, TransportService transportService, ActionFilters actionFilters) { + super(GetFlamegraphAction.NAME, transportService, actionFilters, GetStackTracesRequest::new); + this.nodeClient = nodeClient; + this.transportService = transportService; + } + + @Override + protected void doExecute(Task task, GetStackTracesRequest request, ActionListener listener) { + Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), task); + long start = System.nanoTime(); + client.execute(GetStackTracesAction.INSTANCE, request, new ActionListener<>() { + @Override + public void onResponse(GetStackTracesResponse response) { + long responseStart = System.nanoTime(); + try { + GetFlamegraphResponse flamegraphResponse = buildFlamegraph(response); + log.debug( + "getFlamegraphAction took [" + + (System.nanoTime() - start) / 1_000_000.0d + + "] ms (processing response: [" + + (System.nanoTime() - responseStart) / 1_000_000.0d + + "] ms." + ); + listener.onResponse(flamegraphResponse); + } catch (Exception ex) { + listener.onFailure(ex); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + static GetFlamegraphResponse buildFlamegraph(GetStackTracesResponse response) { + FlamegraphBuilder builder = new FlamegraphBuilder(response.getTotalFrames(), response.getSamplingRate()); + if (response.getTotalFrames() == 0) { + return builder.build(); + } + + SortedMap sortedStacktraces = new TreeMap<>(response.getStackTraces()); + for (Map.Entry st : sortedStacktraces.entrySet()) { + String stackTraceId = st.getKey(); + StackTrace stackTrace = st.getValue(); + int samples = response.getStackTraceEvents().getOrDefault(stackTraceId, 0); + builder.setCurrentNode(0); + builder.addSamplesInclusive(0, samples); + builder.addSamplesExclusive(0, 0); + + int frameCount = stackTrace.frameIds.size(); + for (int i = 0; i < frameCount; i++) { + String frameId = stackTrace.frameIds.get(i); + String fileId = stackTrace.fileIds.get(i); + Integer frameType = stackTrace.typeIds.get(i); + Integer addressOrLine = stackTrace.addressOrLines.get(i); + StackFrame stackFrame = response.getStackFrames().getOrDefault(frameId, EMPTY_STACKFRAME); + String executable = response.getExecutables().getOrDefault(fileId, ""); + + for (Frame frame : stackFrame.frames()) { + String frameGroupId = createFrameGroupId(fileId, addressOrLine, executable, frame.fileName(), frame.functionName()); + + int nodeId; + if (builder.isExists(frameGroupId)) { + nodeId = builder.getNodeId(frameGroupId); + builder.addSamplesInclusive(nodeId, samples); + } else { + nodeId = builder.addNode( + fileId, + frameType, + frame.inline(), + executable, + addressOrLine, + frame.functionName(), + frame.functionOffset(), + frame.fileName(), + frame.lineNumber(), + samples, + frameGroupId + ); + } + if (i == frameCount - 1) { + // Leaf frame: sum up counts for exclusive CPU. + builder.addSamplesExclusive(nodeId, samples); + } + builder.setCurrentNode(nodeId); + } + } + } + return builder.build(); + } + + @SuppressForbidden(reason = "Using pathSeparator constant to extract the filename with low overhead") + private static String getFilename(String fullPath) { + if (fullPath == null || fullPath.isEmpty()) { + return fullPath; + } + int lastSeparatorIdx = fullPath.lastIndexOf(File.pathSeparator); + return lastSeparatorIdx == -1 ? fullPath : fullPath.substring(lastSeparatorIdx + 1); + } + + private static String createFrameGroupId( + String fileId, + Integer addressOrLine, + String exeFilename, + String sourceFilename, + String functionName + ) { + StringBuilder sb = new StringBuilder(); + if (functionName.isEmpty()) { + sb.append(fileId); + sb.append(addressOrLine); + } else { + sb.append(exeFilename); + sb.append(functionName); + sb.append(getFilename(sourceFilename)); + } + return sb.toString(); + } + + private static class FlamegraphBuilder { + private int currentNode = 0; + private int size = 0; + // Map: FrameGroupId -> NodeId + private final List> edges; + private final List fileIds; + private final List frameTypes; + private final List inlineFrames; + private final List fileNames; + private final List addressOrLines; + private final List functionNames; + private final List functionOffsets; + private final List sourceFileNames; + private final List sourceLines; + private final List countInclusive; + private final List countExclusive; + private final double samplingRate; + + FlamegraphBuilder(int frames, double samplingRate) { + // as the number of frames does not account for inline frames we slightly overprovision. + int capacity = (int) (frames * 1.1d); + this.edges = new ArrayList<>(capacity); + this.fileIds = new ArrayList<>(capacity); + this.frameTypes = new ArrayList<>(capacity); + this.inlineFrames = new ArrayList<>(capacity); + this.fileNames = new ArrayList<>(capacity); + this.addressOrLines = new ArrayList<>(capacity); + this.functionNames = new ArrayList<>(capacity); + this.functionOffsets = new ArrayList<>(capacity); + this.sourceFileNames = new ArrayList<>(capacity); + this.sourceLines = new ArrayList<>(capacity); + this.countInclusive = new ArrayList<>(capacity); + this.countExclusive = new ArrayList<>(capacity); + if (frames > 0) { + // root node + int nodeId = this.addNode("", 0, false, "", 0, "", 0, "", 0, 0, null); + this.setCurrentNode(nodeId); + } + this.samplingRate = samplingRate; + } + + // returns the new node's id + public int addNode( + String fileId, + int frameType, + boolean inline, + String fileName, + Integer addressOrLine, + String functionName, + int functionOffset, + String sourceFileName, + int sourceLine, + int samples, + String frameGroupId + ) { + int node = this.size; + this.edges.add(new HashMap<>()); + this.fileIds.add(fileId); + this.frameTypes.add(frameType); + this.inlineFrames.add(inline); + this.fileNames.add(fileName); + this.addressOrLines.add(addressOrLine); + this.functionNames.add(functionName); + this.functionOffsets.add(functionOffset); + this.sourceFileNames.add(sourceFileName); + this.sourceLines.add(sourceLine); + this.countInclusive.add(samples); + this.countExclusive.add(0); + if (frameGroupId != null) { + this.edges.get(currentNode).put(frameGroupId, node); + } + this.size++; + return node; + } + + public void setCurrentNode(int nodeId) { + this.currentNode = nodeId; + } + + public boolean isExists(String frameGroupId) { + return this.edges.get(currentNode).containsKey(frameGroupId); + } + + public int getNodeId(String frameGroupId) { + return this.edges.get(currentNode).get(frameGroupId); + } + + public void addSamplesInclusive(int nodeId, int sampleCount) { + Integer priorSampleCount = this.countInclusive.get(nodeId); + this.countInclusive.set(nodeId, priorSampleCount + sampleCount); + } + + public void addSamplesExclusive(int nodeId, int sampleCount) { + Integer priorSampleCount = this.countExclusive.get(nodeId); + this.countExclusive.set(nodeId, priorSampleCount + sampleCount); + } + + public GetFlamegraphResponse build() { + return new GetFlamegraphResponse( + size, + samplingRate, + edges, + fileIds, + frameTypes, + inlineFrames, + fileNames, + addressOrLines, + functionNames, + functionOffsets, + sourceFileNames, + sourceLines, + countInclusive, + countExclusive + ); + } + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 60ecd7f845760..3a3c37b04b3d5 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -44,7 +44,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Random; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; @@ -155,7 +154,6 @@ private void searchEventGroupByStackTrace( ) { long start = System.nanoTime(); GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(); - int exp = eventsIndex.getExponent(); responseBuilder.setSampleRate(eventsIndex.getSampleRate()); client.prepareSearch(eventsIndex.getName()) .setTrackTotalHits(false) @@ -372,55 +370,6 @@ private void retrieveStackTraceDetails( } } - private static class Resampler { - private final boolean requiresResampling; - - private final Random r; - - private final double sampleRate; - - private final double p; - - Resampler(GetStackTracesRequest request, double sampleRate, long totalCount) { - // Manually reduce sample count if totalCount exceeds sampleSize by 10%. - if (totalCount > request.getSampleSize() * 1.1) { - this.requiresResampling = true; - // Make the RNG predictable to get reproducible results. - this.r = new Random(request.hashCode()); - this.sampleRate = sampleRate; - this.p = (double) request.getSampleSize() / totalCount; - } else { - this.requiresResampling = false; - this.r = null; - this.sampleRate = sampleRate; - this.p = 1.0d; - } - } - - public int adjustSampleCount(int originalCount) { - if (requiresResampling) { - int newCount = 0; - for (int i = 0; i < originalCount; i++) { - if (r.nextDouble() < p) { - newCount++; - } - } - if (newCount > 0) { - // Adjust the sample counts from down-sampled to fully sampled. - // Be aware that downsampling drops entries from stackTraceEvents, so that - // the sum of the upscaled count values is less that totalCount. - // This code needs to be refactored to move all scaling into the server - // side, not just the resampling-scaling. - return (int) Math.floor(newCount / (p)); - } else { - return 0; - } - } else { - return originalCount; - } - } - } - /** * Collects stack trace details which are retrieved concurrently and sends a response only when all details are known. */ @@ -458,7 +407,12 @@ public void onStackFramesResponse(MultiGetResponse multiGetItemResponses) { if (frame.getResponse().isExists()) { // Duplicates are expected as we query multiple indices - do a quick pre-check before we deserialize a response if (stackFrames.containsKey(frame.getId()) == false) { - stackFrames.putIfAbsent(frame.getId(), StackFrame.fromSource(frame.getResponse().getSource())); + StackFrame stackFrame = StackFrame.fromSource(frame.getResponse().getSource()); + if (stackFrame.isEmpty() == false) { + stackFrames.putIfAbsent(frame.getId(), stackFrame); + } else { + log.trace("Stack frame with id [{}] has no properties.", frame.getId()); + } } } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java index abac8971596a1..8110cc5e968ec 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStatusAction.java @@ -7,23 +7,33 @@ package org.elasticsearch.xpack.profiling; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.XPackSettings; public class TransportGetStatusAction extends TransportMasterNodeAction { + private static final Logger log = LogManager.getLogger(TransportGetStatusAction.class); + + private final StatusResolver resolver; + @Inject public TransportGetStatusAction( TransportService transportService, @@ -43,6 +53,7 @@ public TransportGetStatusAction( GetStatusAction.Response::new, ThreadPool.Names.SAME ); + this.resolver = new StatusResolver(clusterService); } @Override @@ -52,33 +63,102 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - IndexStateResolver indexStateResolver = new IndexStateResolver(getValue(state, ProfilingPlugin.PROFILING_CHECK_OUTDATED_INDICES)); - - boolean pluginEnabled = getValue(state, XPackSettings.PROFILING_ENABLED); - boolean resourceManagementEnabled = getValue(state, ProfilingPlugin.PROFILING_TEMPLATES_ENABLED); - - boolean templatesCreated = ProfilingIndexTemplateRegistry.isAllResourcesCreated(state, clusterService.getSettings()); - boolean indicesCreated = ProfilingIndexManager.isAllResourcesCreated(state, indexStateResolver); - boolean dataStreamsCreated = ProfilingDataStreamManager.isAllResourcesCreated(state, indexStateResolver); - boolean resourcesCreated = templatesCreated && indicesCreated && dataStreamsCreated; - - boolean indicesPre891 = ProfilingIndexManager.isAnyResourceTooOld(state, indexStateResolver); - boolean dataStreamsPre891 = ProfilingDataStreamManager.isAnyResourceTooOld(state, indexStateResolver); - boolean anyPre891Data = indicesPre891 || dataStreamsPre891; - listener.onResponse(new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data)); - } - - private boolean getValue(ClusterState state, Setting setting) { - Metadata metadata = state.getMetadata(); - if (metadata.settings().hasValue(setting.getKey())) { - return setting.get(metadata.settings()); + if (request.waitForResourcesCreated()) { + createAndRegisterListener(listener, request.timeout()); } else { - return setting.get(clusterService.getSettings()); + listener.onResponse(resolver.getResponse(state)); } } + private void createAndRegisterListener(ActionListener listener, TimeValue timeout) { + final DiscoveryNode localNode = clusterService.localNode(); + ClusterStateObserver.waitForState( + clusterService, + threadPool.getThreadContext(), + new StatusListener(listener, localNode, clusterService, resolver), + clusterState -> resolver.getResponse(clusterState).isResourcesCreated(), + timeout, + log + ); + } + @Override protected ClusterBlockException checkBlock(GetStatusAction.Request request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } + + private static class StatusListener implements ClusterStateObserver.Listener { + private final ActionListener listener; + private final DiscoveryNode localNode; + + private final ClusterService clusterService; + + private final StatusResolver resolver; + + private StatusListener( + ActionListener listener, + DiscoveryNode localNode, + ClusterService clusterService, + StatusResolver resolver + ) { + this.listener = listener; + this.localNode = localNode; + this.clusterService = clusterService; + this.resolver = resolver; + } + + @Override + public void onNewClusterState(ClusterState state) { + listener.onResponse(resolver.getResponse(state)); + } + + @Override + public void onClusterServiceClose() { + listener.onFailure(new NodeClosedException(localNode)); + } + + @Override + public void onTimeout(TimeValue timeout) { + GetStatusAction.Response response = resolver.getResponse(clusterService.state()); + response.setTimedOut(true); + listener.onResponse(response); + } + } + + private static class StatusResolver { + private final ClusterService clusterService; + + private StatusResolver(ClusterService clusterService) { + this.clusterService = clusterService; + } + + private GetStatusAction.Response getResponse(ClusterState state) { + IndexStateResolver indexStateResolver = new IndexStateResolver( + getValue(state, ProfilingPlugin.PROFILING_CHECK_OUTDATED_INDICES) + ); + + boolean pluginEnabled = getValue(state, XPackSettings.PROFILING_ENABLED); + boolean resourceManagementEnabled = getValue(state, ProfilingPlugin.PROFILING_TEMPLATES_ENABLED); + + boolean templatesCreated = ProfilingIndexTemplateRegistry.isAllResourcesCreated(state, clusterService.getSettings()); + boolean indicesCreated = ProfilingIndexManager.isAllResourcesCreated(state, indexStateResolver); + boolean dataStreamsCreated = ProfilingDataStreamManager.isAllResourcesCreated(state, indexStateResolver); + boolean resourcesCreated = templatesCreated && indicesCreated && dataStreamsCreated; + + boolean indicesPre891 = ProfilingIndexManager.isAnyResourceTooOld(state, indexStateResolver); + boolean dataStreamsPre891 = ProfilingDataStreamManager.isAnyResourceTooOld(state, indexStateResolver); + boolean anyPre891Data = indicesPre891 || dataStreamsPre891; + + return new GetStatusAction.Response(pluginEnabled, resourceManagementEnabled, resourcesCreated, anyPre891Data); + } + + private boolean getValue(ClusterState state, Setting setting) { + Metadata metadata = state.getMetadata(); + if (metadata.settings().hasValue(setting.getKey())) { + return setting.get(metadata.settings()); + } else { + return setting.get(clusterService.getSettings()); + } + } + } } diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ResamplerTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ResamplerTests.java new file mode 100644 index 0000000000000..79585986c64e2 --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/ResamplerTests.java @@ -0,0 +1,108 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.test.ESTestCase; + +import java.util.random.RandomGenerator; + +public class ResamplerTests extends ESTestCase { + + private Resampler createResampler(GetStackTracesRequest request, double sampleRate, long totalCount) { + return new Resampler(request, sampleRate, totalCount) { + @Override + protected RandomGenerator createRandom(GetStackTracesRequest request) { + return DeterministicRandom.of(0.0d, 1.0d); + } + }; + } + + public void testNoResamplingNoSampleRateAdjustment() { + // corresponds to profiling-events-5pow01 + double sampleRate = 1.0d / Math.pow(5.0d, 1); + int requestedSamples = 20_000; + int actualTotalSamples = 10_000; + + GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + request.setAdjustSampleCount(false); + + Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); + + int actualSamplesSingleTrace = 5_000; + assertEquals(5_000, resampler.adjustSampleCount(actualSamplesSingleTrace)); + } + + public void testNoResamplingButAdjustSampleRate() { + // corresponds to profiling-events-5pow01 + double sampleRate = 1.0d / Math.pow(5.0d, 1); + int requestedSamples = 20_000; + int actualTotalSamples = 10_000; + + GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + request.setAdjustSampleCount(true); + + Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); + + int actualSamplesSingleTrace = 5_000; + assertEquals(25_000, resampler.adjustSampleCount(actualSamplesSingleTrace)); + } + + public void testResamplingNoSampleRateAdjustment() { + // corresponds to profiling-events-5pow01 + double sampleRate = 1.0d / Math.pow(5.0d, 1); + int requestedSamples = 20_000; + int actualTotalSamples = 40_000; + + GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + request.setAdjustSampleCount(false); + + Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); + + int actualSamplesSingleTrace = 20_000; + assertEquals(20_000, resampler.adjustSampleCount(actualSamplesSingleTrace)); + } + + public void testResamplingAndSampleRateAdjustment() { + // corresponds to profiling-events-5pow01 + double sampleRate = 1.0d / Math.pow(5.0d, 1); + int requestedSamples = 20_000; + int actualTotalSamples = 40_000; + + GetStackTracesRequest request = new GetStackTracesRequest(requestedSamples, null); + request.setAdjustSampleCount(true); + + Resampler resampler = createResampler(request, sampleRate, actualTotalSamples); + + int actualSamplesSingleTrace = 20_000; + assertEquals(100_000, resampler.adjustSampleCount(actualSamplesSingleTrace)); + } + + private static class DeterministicRandom implements RandomGenerator { + private final double[] values; + private int idx; + + private DeterministicRandom(double... values) { + this.values = values; + this.idx = 0; + } + + public static RandomGenerator of(double... values) { + return new DeterministicRandom(values); + } + + @Override + public long nextLong() { + return Double.doubleToLongBits(nextDouble()); + } + + @Override + public double nextDouble() { + return values[idx++ % values.length]; + } + } +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphActionTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphActionTests.java new file mode 100644 index 0000000000000..2adb41ce45038 --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphActionTests.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.test.ESTestCase; + +import java.util.List; +import java.util.Map; + +public class TransportGetFlamegraphActionTests extends ESTestCase { + public void testCreateFlamegraph() { + GetStackTracesResponse stacktraces = new GetStackTracesResponse( + Map.of( + "2buqP1GpF-TXYmL4USW8gA", + new StackTrace( + List.of(12784352, 19334053, 19336161, 18795859, 18622708, 18619213, 12989721, 13658842, 16339645), + List.of( + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w", + "fr28zxcZ2UDasxYuu6dV-w" + ), + List.of( + "fr28zxcZ2UDasxYuu6dV-wAAAAAAwxLg", + "fr28zxcZ2UDasxYuu6dV-wAAAAABJwOl", + "fr28zxcZ2UDasxYuu6dV-wAAAAABJwvh", + "fr28zxcZ2UDasxYuu6dV-wAAAAABHs1T", + "fr28zxcZ2UDasxYuu6dV-wAAAAABHCj0", + "fr28zxcZ2UDasxYuu6dV-wAAAAABHBtN", + "fr28zxcZ2UDasxYuu6dV-wAAAAAAxjUZ", + "fr28zxcZ2UDasxYuu6dV-wAAAAAA0Gra", + "fr28zxcZ2UDasxYuu6dV-wAAAAAA-VK9" + ), + List.of(3, 3, 3, 3, 3, 3, 3, 3, 3) + ) + ), + Map.of(), + Map.of("fr28zxcZ2UDasxYuu6dV-w", "containerd"), + Map.of("2buqP1GpF-TXYmL4USW8gA", 1), + 9, + 1.0d + ); + GetFlamegraphResponse response = TransportGetFlamegraphAction.buildFlamegraph(stacktraces); + assertNotNull(response); + assertEquals(10, response.getSize()); + assertEquals(1.0d, response.getSamplingRate(), 0.001d); + assertEquals(List.of(1, 1, 1, 1, 1, 1, 1, 1, 1, 1), response.getCountInclusive()); + assertEquals(List.of(0, 0, 0, 0, 0, 0, 0, 0, 0, 1), response.getCountExclusive()); + + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index f7a1d3e42036a..fd28dd9048275 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -70,10 +70,10 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.snapshots.sourceonly.SourceOnlySnapshotRepository; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 61c953e7f1c13..fe799b0a590f2 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -499,6 +499,7 @@ public class Constants { "indices:data/read/mtv[shard]", "indices:data/read/open_point_in_time", "indices:data/read/profiling/stack_traces", + "indices:data/read/profiling/flamegraph", "indices:data/read/rank_eval", "indices:data/read/scroll", "indices:data/read/scroll/clear", diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java index 49ac36b854298..8311d0f613175 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java @@ -35,7 +35,6 @@ import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; -import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -95,11 +94,11 @@ protected Collection> nodePlugins() { return List.of( LocalStateSecurity.class, DataStreamsPlugin.class, - SystemDataStreamTestPlugin.class, MapperExtrasPlugin.class, Wildcard.class, Downsample.class, - AggregateMetricMapperPlugin.class + AggregateMetricMapperPlugin.class, + SystemDataStreamWithDownsamplingConfigurationPlugin.class ); } @@ -135,70 +134,27 @@ public void testDownsamplingAuthorized() throws Exception { waitAndAssertDownsamplingCompleted(dataStreamName); } - public void testConfiguringLifecycleWithDownsamplingForSystemDataStreamFails() { - String dataStreamName = SystemDataStreamTestPlugin.SYSTEM_DATA_STREAM_NAME; - indexDocuments(client(), dataStreamName, 100); - DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() - .downsampling( - new DataStreamLifecycle.Downsampling( - List.of( - new DataStreamLifecycle.Downsampling.Round( - TimeValue.timeValueMillis(0), - new DownsampleConfig(new DateHistogramInterval("5m")) - ), - new DataStreamLifecycle.Downsampling.Round( - TimeValue.timeValueSeconds(10), - new DownsampleConfig(new DateHistogramInterval("10m")) - ) - ) - ) - ) - .build(); - IllegalArgumentException illegalArgumentException = expectThrows( - IllegalArgumentException.class, - () -> client().execute( - PutDataStreamLifecycleAction.INSTANCE, - new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, lifecycle) - ).actionGet() - ); - assertThat( - illegalArgumentException.getMessage(), - is( - "System data streams do not support downsampling as part of their lifecycle " - + "configuration. Encountered [" - + dataStreamName - + "] in the request" - ) - ); - } - - public void testExplicitSystemDataStreamConfigurationWithDownsamplingFails() { - SystemDataStreamWithDownsamplingConfigurationPlugin pluginWithIllegalSystemDataStream = - new SystemDataStreamWithDownsamplingConfigurationPlugin(); - IllegalArgumentException illegalArgumentException = expectThrows( - IllegalArgumentException.class, - () -> pluginWithIllegalSystemDataStream.getSystemDataStreamDescriptors() - ); - assertThat( - illegalArgumentException.getMessage(), - is("System data streams do not support downsampling as part of their lifecycle configuration") - ); + @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") + public void testSystemDataStreamConfigurationWithDownsampling() throws Exception { + String dataStreamName = SystemDataStreamWithDownsamplingConfigurationPlugin.SYSTEM_DATA_STREAM_NAME; + indexDocuments(client(), dataStreamName, 10_000); + waitAndAssertDownsamplingCompleted(dataStreamName); } private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Exception { List backingIndices = getDataStreamBackingIndices(dataStreamName); String firstGenerationBackingIndex = backingIndices.get(0).getName(); - String oneSecondDownsampleIndex = "downsample-5m-" + firstGenerationBackingIndex; - String tenSecondsDownsampleIndex = "downsample-10m-" + firstGenerationBackingIndex; + String firstRoundDownsamplingIndex = "downsample-5m-" + firstGenerationBackingIndex; + String secondRoundDownsamplingIndex = "downsample-10m-" + firstGenerationBackingIndex; Set witnessedDownsamplingIndices = new HashSet<>(); clusterService().addListener(event -> { - if (event.indicesCreated().contains(oneSecondDownsampleIndex) - || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(oneSecondDownsampleIndex))) { - witnessedDownsamplingIndices.add(oneSecondDownsampleIndex); + if (event.indicesCreated().contains(firstRoundDownsamplingIndex) + || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(firstRoundDownsamplingIndex))) { + witnessedDownsamplingIndices.add(firstRoundDownsamplingIndex); } - if (event.indicesCreated().contains(tenSecondsDownsampleIndex)) { - witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); + if (event.indicesCreated().contains(secondRoundDownsamplingIndex)) { + witnessedDownsamplingIndices.add(secondRoundDownsamplingIndex); } }); @@ -207,15 +163,15 @@ private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Ex assertBusy(() -> { assertNoAuthzErrors(); // first downsampling round - assertThat(witnessedDownsamplingIndices.contains(oneSecondDownsampleIndex), is(true)); + assertThat(witnessedDownsamplingIndices.contains(firstRoundDownsamplingIndex), is(true)); }, 30, TimeUnit.SECONDS); assertBusy(() -> { assertNoAuthzErrors(); assertThat(witnessedDownsamplingIndices.size(), is(2)); - assertThat(witnessedDownsamplingIndices.contains(oneSecondDownsampleIndex), is(true)); + assertThat(witnessedDownsamplingIndices.contains(firstRoundDownsamplingIndex), is(true)); - assertThat(witnessedDownsamplingIndices.contains(tenSecondsDownsampleIndex), is(true)); + assertThat(witnessedDownsamplingIndices.contains(secondRoundDownsamplingIndex), is(true)); }, 30, TimeUnit.SECONDS); assertBusy(() -> { @@ -226,9 +182,9 @@ private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Ex String writeIndex = dsBackingIndices.get(1).getName(); assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); // the last downsampling round must remain in the data stream - assertThat(dsBackingIndices.get(0).getName(), is(tenSecondsDownsampleIndex)); + assertThat(dsBackingIndices.get(0).getName(), is(secondRoundDownsamplingIndex)); assertThat(indexExists(firstGenerationBackingIndex), is(false)); - assertThat(indexExists(oneSecondDownsampleIndex), is(false)); + assertThat(indexExists(firstRoundDownsamplingIndex), is(false)); }, 30, TimeUnit.SECONDS); } @@ -378,55 +334,6 @@ private void bulkIndex(Client client, String dataStreamName, Supplier Indexed [{}] documents. Dropped [{}] duplicates.", docsIndexed, duplicates); } - public static class SystemDataStreamTestPlugin extends Plugin implements SystemIndexPlugin { - - static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; - - @Override - public Collection getSystemDataStreamDescriptors() { - Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) - .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); - - try { - return List.of( - new SystemDataStreamDescriptor( - SYSTEM_DATA_STREAM_NAME, - "a system data stream for testing", - SystemDataStreamDescriptor.Type.EXTERNAL, - new ComposableIndexTemplate( - List.of(SYSTEM_DATA_STREAM_NAME), - new Template(settings.build(), getTSDBMappings(), null, null), - null, - null, - null, - null, - new ComposableIndexTemplate.DataStreamTemplate() - ), - Map.of(), - Collections.singletonList("test"), - new ExecutorNames( - ThreadPool.Names.SYSTEM_CRITICAL_READ, - ThreadPool.Names.SYSTEM_READ, - ThreadPool.Names.SYSTEM_WRITE - ) - ) - ); - } catch (IOException e) { - throw new RuntimeException("Unable to create system data stream descriptor", e); - } - } - - @Override - public String getFeatureName() { - return SystemDataStreamTestPlugin.class.getSimpleName(); - } - - @Override - public String getFeatureDescription() { - return "A plugin for testing the data stream lifecycle runtime actions on system data streams"; - } - } - public static class SystemDataStreamWithDownsamplingConfigurationPlugin extends Plugin implements SystemIndexPlugin { static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; @@ -484,7 +391,7 @@ public Collection getSystemDataStreamDescriptors() { @Override public String getFeatureName() { - return SystemDataStreamTestPlugin.class.getSimpleName(); + return SystemDataStreamWithDownsamplingConfigurationPlugin.class.getSimpleName(); } @Override diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java index 1daff64f57fcc..1776b3bfd3c36 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/test/SecuritySingleNodeTestCase.java @@ -277,6 +277,7 @@ protected boolean isTransportSSLEnabled() { * Creates a new client if the method is invoked for the first time in the context of the current test scope. * The returned client gets automatically closed when needed, it shouldn't be closed as part of tests otherwise * it cannot be reused by other tests anymore. + * Requires that {@link org.elasticsearch.test.ESSingleNodeTestCase#addMockHttpTransport()} is overriden and set to false. */ protected RestClient getRestClient() { return getRestClient(client()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java index 26e08fd34bdec..8843ccd3ffe15 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmSingleNodeTests.java @@ -7,31 +7,55 @@ package org.elasticsearch.xpack.security.authc.jwt; +import com.nimbusds.jose.JWSAlgorithm; import com.nimbusds.jose.JWSHeader; +import com.nimbusds.jose.crypto.MACSigner; +import com.nimbusds.jose.jwk.OctetSequenceKey; import com.nimbusds.jose.util.Base64URL; import com.nimbusds.jwt.JWTClaimsSet; import com.nimbusds.jwt.SignedJWT; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Strings; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.test.SecuritySingleNodeTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.xpack.core.security.authc.Realm; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.security.Security; import org.elasticsearch.xpack.security.authc.Realms; +import java.nio.charset.StandardCharsets; import java.text.ParseException; import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import static org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; public class JwtRealmSingleNodeTests extends SecuritySingleNodeTestCase { + private final String jwt0SharedSecret = "jwt0_shared_secret"; + private final String jwt1SharedSecret = "jwt1_shared_secret"; + private final String jwt2SharedSecret = "jwt2_shared_secret"; + private final String jwtHmacKey = "test-HMAC/secret passphrase-value"; + @Override protected Settings nodeSettings() { final Settings.Builder builder = Settings.builder() @@ -59,6 +83,7 @@ protected Settings nodeSettings() { .put("xpack.security.authc.realms.jwt.jwt1.claims.principal", "appid") .put("xpack.security.authc.realms.jwt.jwt1.claims.groups", "groups") .put("xpack.security.authc.realms.jwt.jwt1.client_authentication.type", "shared_secret") + .put("xpack.security.authc.realms.jwt.jwt1.client_authentication.rotation_grace_period", "10m") .putList("xpack.security.authc.realms.jwt.jwt1.allowed_signature_algorithms", "HS256", "HS384") // 3rd JWT realm .put("xpack.security.authc.realms.jwt.jwt2.order", 30) @@ -70,20 +95,25 @@ protected Settings nodeSettings() { .put("xpack.security.authc.realms.jwt.jwt2.claims.principal", "email") .put("xpack.security.authc.realms.jwt.jwt2.claims.groups", "groups") .put("xpack.security.authc.realms.jwt.jwt2.client_authentication.type", "shared_secret") + .put("xpack.security.authc.realms.jwt.jwt2.client_authentication.rotation_grace_period", "0s") .putList("xpack.security.authc.realms.jwt.jwt2.allowed_signature_algorithms", "HS256", "HS384"); SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { - secureSettings.setString("xpack.security.authc.realms.jwt.jwt0.hmac_key", "jwt0_hmac_key"); - secureSettings.setString("xpack.security.authc.realms.jwt.jwt0.client_authentication.shared_secret", "jwt0_shared_secret"); - secureSettings.setString("xpack.security.authc.realms.jwt.jwt1.hmac_key", "jwt1_hmac_key"); - secureSettings.setString("xpack.security.authc.realms.jwt.jwt1.client_authentication.shared_secret", "jwt1_shared_secret"); - secureSettings.setString("xpack.security.authc.realms.jwt.jwt2.hmac_key", "jwt2_hmac_key"); - secureSettings.setString("xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", "jwt2_shared_secret"); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt0.hmac_key", jwtHmacKey); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt0.client_authentication.shared_secret", jwt0SharedSecret); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt1.hmac_key", jwtHmacKey); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt1.client_authentication.shared_secret", jwt1SharedSecret); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt2.hmac_key", jwtHmacKey); + secureSettings.setString("xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", jwt2SharedSecret); }); return builder.build(); } + protected boolean addMockHttpTransport() { + return false; + } + public void testAnyJwtRealmWillExtractTheToken() throws ParseException { final List jwtRealms = getJwtRealms(); final JwtRealm jwtRealm = randomFrom(jwtRealms); @@ -172,6 +202,132 @@ public void testJwtRealmThrowsErrorOnJwtParsingFailure() throws ParseException { assertThat(e2.getMessage(), containsString("Failed to parse JWT claims set")); } + @TestLogging(value = "org.elasticsearch.xpack.security.authc.jwt:DEBUG", reason = "failures can be very difficult to troubleshoot") + public void testClientSecretRotation() throws Exception { + final List jwtRealms = getJwtRealms(); + Map realmsByName = jwtRealms.stream().collect(Collectors.toMap(Realm::name, r -> r)); + JwtRealm realm0 = realmsByName.get("jwt0"); + JwtRealm realm1 = realmsByName.get("jwt1"); + JwtRealm realm2 = realmsByName.get("jwt2"); + // sanity check + assertThat(getGracePeriod(realm0), equalTo(CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD.getDefault(Settings.EMPTY))); + assertThat(getGracePeriod(realm1), equalTo(TimeValue.timeValueMinutes(10))); + assertThat(getGracePeriod(realm2), equalTo(TimeValue.timeValueSeconds(0))); + // create claims and test before rotation + RestClient client = getRestClient(); + // valid jwt for realm0 + JWTClaimsSet.Builder jwt0Claims = new JWTClaimsSet.Builder(); + jwt0Claims.audience("es-01") + .issuer("my-issuer-01") + .subject("me") + .claim("groups", "admin") + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(600))); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)).getStatusLine().getStatusCode() + ); + // valid jwt for realm1 + JWTClaimsSet.Builder jwt1Claims = new JWTClaimsSet.Builder(); + jwt1Claims.audience("es-02") + .issuer("my-issuer-02") + .subject("user-02") + .claim("groups", "admin") + .claim("appid", "X") + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(300))); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)).getStatusLine().getStatusCode() + ); + // valid jwt for realm2 + JWTClaimsSet.Builder jwt2Claims = new JWTClaimsSet.Builder(); + jwt2Claims.audience("es-03") + .issuer("my-issuer-03") + .subject("user-03") + .claim("groups", "admin") + .claim("email", "me@example.com") + .issueTime(Date.from(Instant.now())) + .expirationTime(Date.from(Instant.now().plusSeconds(300))); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)).getStatusLine().getStatusCode() + ); + // update the secret in the secure settings + final MockSecureSettings newSecureSettings = new MockSecureSettings(); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm0.name() + ".client_authentication.shared_secret", + "realm0updatedSecret" + ); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm1.name() + ".client_authentication.shared_secret", + "realm1updatedSecret" + ); + newSecureSettings.setString( + "xpack.security.authc.realms.jwt." + realm2.name() + ".client_authentication.shared_secret", + "realm2updatedSecret" + ); + // reload settings + final PluginsService plugins = getInstanceFromNode(PluginsService.class); + final LocalStateSecurity localStateSecurity = plugins.filterPlugins(LocalStateSecurity.class).get(0); + for (Plugin p : localStateSecurity.plugins()) { + if (p instanceof Security securityPlugin) { + Settings.Builder newSettingsBuilder = Settings.builder().setSecureSettings(newSecureSettings); + securityPlugin.reload(newSettingsBuilder.build()); + } + } + // ensure the old value still works for realm 0 (default grace period) + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), jwt0SharedSecret)).getStatusLine().getStatusCode() + ); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt0Claims.build()), "realm0updatedSecret")).getStatusLine().getStatusCode() + ); + // ensure the old value still works for realm 1 (explicit grace period) + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), jwt1SharedSecret)).getStatusLine().getStatusCode() + ); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt1Claims.build()), "realm1updatedSecret")).getStatusLine().getStatusCode() + ); + // ensure the old value does not work for realm 2 (no grace period) + ResponseException exception = expectThrows( + ResponseException.class, + () -> client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), jwt2SharedSecret)).getStatusLine().getStatusCode() + ); + assertEquals(401, exception.getResponse().getStatusLine().getStatusCode()); + assertEquals( + 200, + client.performRequest(getRequest(getSignedJWT(jwt2Claims.build()), "realm2updatedSecret")).getStatusLine().getStatusCode() + ); + } + + private SignedJWT getSignedJWT(JWTClaimsSet claimsSet) throws Exception { + JWSHeader jwtHeader = new JWSHeader.Builder(JWSAlgorithm.HS256).build(); + OctetSequenceKey.Builder jwt0signer = new OctetSequenceKey.Builder(jwtHmacKey.getBytes(StandardCharsets.UTF_8)); + jwt0signer.algorithm(JWSAlgorithm.HS256); + SignedJWT jwt = new SignedJWT(jwtHeader, claimsSet); + jwt.sign(new MACSigner(jwt0signer.build())); + return jwt; + } + + private Request getRequest(SignedJWT jwt, String shardSecret) { + Request request = new Request("GET", "/_security/_authenticate"); + RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.addHeader("Authorization", "Bearer " + jwt.serialize()); + options.addHeader("ES-Client-Authentication", "SharedSecret " + shardSecret); + request.setOptions(options); + return request; + } + + private TimeValue getGracePeriod(JwtRealm realm) { + return realm.getConfig().getConcreteSetting(CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD).get(realm.getConfig().settings()); + } + private void assertJwtToken(JwtAuthenticationToken token, String tokenPrincipal, String sharedSecret, SignedJWT signedJWT) throws ParseException { assertThat(token.principal(), equalTo(tokenPrincipal)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java index eac48d3fe7950..26112ed11231f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/PkiRealmBootstrapCheck.java @@ -8,6 +8,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.xpack.core.XPackSettings; @@ -78,4 +79,9 @@ private List getSslContextNames(Settings settings) { public boolean alwaysEnforce() { return true; } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_PKI_REALM; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index bf4f72fbb8a98..9a16785c39dfc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -77,6 +77,7 @@ import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.plugins.interceptor.RestServerActionPlugin; @@ -89,10 +90,10 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportInterceptor; @@ -178,6 +179,7 @@ import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.RealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; @@ -405,6 +407,7 @@ import static org.elasticsearch.xpack.core.XPackSettings.API_KEY_SERVICE_ENABLED_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.core.security.SecurityField.FIELD_LEVEL_SECURITY_FEATURE; +import static org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.CLIENT_AUTHENTICATION_SHARED_SECRET; import static org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore.INCLUDED_RESERVED_ROLES_SETTING; import static org.elasticsearch.xpack.security.operator.OperatorPrivileges.OPERATOR_PRIVILEGES_ENABLED; import static org.elasticsearch.xpack.security.transport.SSLEngineUtils.extractClientCertificates; @@ -419,7 +422,8 @@ public class Security extends Plugin MapperPlugin, ExtensiblePlugin, SearchPlugin, - RestServerActionPlugin { + RestServerActionPlugin, + ReloadablePlugin { public static final String SECURITY_CRYPTO_THREAD_POOL_NAME = XPackField.SECURITY + "-crypto"; @@ -545,7 +549,6 @@ public class Security extends Plugin private final SetOnce tokenService = new SetOnce<>(); private final SetOnce securityActionFilter = new SetOnce<>(); private final SetOnce crossClusterAccessAuthcService = new SetOnce<>(); - private final SetOnce sharedGroupFactory = new SetOnce<>(); private final SetOnce dlsBitsetCache = new SetOnce<>(); private final SetOnce> bootstrapChecks = new SetOnce<>(); @@ -554,10 +557,9 @@ public class Security extends Plugin private final SetOnce scriptServiceReference = new SetOnce<>(); private final SetOnce operatorOnlyRegistry = new SetOnce<>(); private final SetOnce operatorPrivilegesService = new SetOnce<>(); - private final SetOnce reservedRoleMappingAction = new SetOnce<>(); - private final SetOnce workflowService = new SetOnce<>(); + private final SetOnce realms = new SetOnce<>(); public Security(Settings settings) { this(settings, Collections.emptyList()); @@ -771,6 +773,7 @@ Collection createComponents( components.add(nativeRoleMappingStore); components.add(realms); components.add(reservedRealm); + this.realms.set(realms); systemIndices.getMainIndexManager().addStateListener(nativeRoleMappingStore::onSecurityIndexStateChange); @@ -1900,6 +1903,19 @@ public BiConsumer getJoinValidator() { return null; } + @Override + public void reload(Settings settings) throws Exception { + if (enabled) { + realms.get().stream().filter(r -> JwtRealmSettings.TYPE.equals(r.realmRef().getType())).forEach(realm -> { + if (realm instanceof JwtRealm jwtRealm) { + jwtRealm.rotateClientSecret( + CLIENT_AUTHENTICATION_SHARED_SECRET.getConcreteSettingForNamespace(realm.realmRef().getName()).get(settings) + ); + } + }); + } + } + static final class ValidateLicenseForFIPS implements BiConsumer { private final boolean inFipsMode; private final LicenseService licenseService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java index ab9aa32c9b859..c6396f886b4bc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheck.java @@ -10,6 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.env.NodeMetadata; import org.elasticsearch.license.ClusterStateLicenseService; import org.elasticsearch.license.License; @@ -44,11 +45,9 @@ public BootstrapCheckResult check(BootstrapContext context) { + "] has changed in the current version. " + " Security features were implicitly disabled for this node but they would now be enabled, possibly" + " preventing access to the node. " - + "See https://www.elastic.co/guide/en/elasticsearch/reference/" - + Version.CURRENT.major - + "." - + Version.CURRENT.minor - + "/security-minimal-setup.html to configure security, or explicitly disable security by " + + "See " + + this.referenceDocs() + + " to configure security, or explicitly disable security by " + "setting [xpack.security.enabled] to \"false\" in elasticsearch.yml before restarting the node." ); } @@ -59,4 +58,9 @@ public BootstrapCheckResult check(BootstrapContext context) { public boolean alwaysEnforce() { return true; } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java index 1c2fbb3df425b..7611ef8d258ce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/TokenSSLBootstrapCheck.java @@ -8,6 +8,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.xpack.core.XPackSettings; import java.util.Locale; @@ -35,4 +36,8 @@ public BootstrapCheckResult check(BootstrapContext context) { } } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_TOKEN_SSL; + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java index 41795bbb62010..5300af37ed7da 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.settings.RotatableSecret; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.util.concurrent.ReleasableLock; @@ -49,6 +50,7 @@ import static java.lang.String.join; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings.CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD; /** * JWT realms supports JWTs as bearer tokens for authenticating to Elasticsearch. @@ -71,7 +73,7 @@ public class JwtRealm extends Realm implements CachingRealm, Releasable { private final ClaimParser claimParserMail; private final ClaimParser claimParserName; private final JwtRealmSettings.ClientAuthenticationType clientAuthenticationType; - private final SecureString clientAuthenticationSharedSecret; + private final RotatableSecret clientAuthenticationSharedSecret; private final JwtAuthenticator jwtAuthenticator; private final TimeValue allowedClockSkew; DelegatedAuthorizationSupport delegatedAuthorizationSupport = null; @@ -86,9 +88,9 @@ public JwtRealm(final RealmConfig realmConfig, final SSLService sslService, fina this.populateUserMetadata = realmConfig.getSetting(JwtRealmSettings.POPULATE_USER_METADATA); this.clientAuthenticationType = realmConfig.getSetting(JwtRealmSettings.CLIENT_AUTHENTICATION_TYPE); - final SecureString sharedSecret = realmConfig.getSetting(JwtRealmSettings.CLIENT_AUTHENTICATION_SHARED_SECRET); - this.clientAuthenticationSharedSecret = Strings.hasText(sharedSecret) ? sharedSecret : null; // convert "" to null - + this.clientAuthenticationSharedSecret = new RotatableSecret( + realmConfig.getSetting(JwtRealmSettings.CLIENT_AUTHENTICATION_SHARED_SECRET) + ); // Validate Client Authentication settings. Throw SettingsException there was a problem. JwtUtil.validateClientAuthenticationSettings( RealmSettings.getFullSettingKey(realmConfig, JwtRealmSettings.CLIENT_AUTHENTICATION_TYPE), @@ -443,6 +445,15 @@ public void usageStats(final ActionListener> listener) { }, listener::onFailure)); } + public void rotateClientSecret(SecureString clientSecret) { + this.clientAuthenticationSharedSecret.rotate(clientSecret, config.getSetting(CLIENT_AUTH_SHARED_SECRET_ROTATION_GRACE_PERIOD)); + } + + // package private for testing + RotatableSecret getClientAuthenticationSharedSecret() { + return clientAuthenticationSharedSecret; + } + /** * Clean up JWT cache (if enabled). */ diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java index 65c4025b4ad85..3e3533f028b38 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtil.java @@ -35,6 +35,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.hash.MessageDigests; +import org.elasticsearch.common.settings.RotatableSecret; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.ssl.SslConfiguration; @@ -99,12 +100,12 @@ public static void validateClientAuthenticationSettings( final String clientAuthenticationTypeConfigKey, final JwtRealmSettings.ClientAuthenticationType clientAuthenticationType, final String clientAuthenticationSharedSecretConfigKey, - final SecureString clientAuthenticationSharedSecret + final RotatableSecret clientAuthenticationSharedSecret ) throws SettingsException { switch (clientAuthenticationType) { case SHARED_SECRET: // If type is "SharedSecret", the shared secret value must be set - if (Strings.hasText(clientAuthenticationSharedSecret) == false) { + if (clientAuthenticationSharedSecret.isSet() == false) { throw new SettingsException( "Missing setting for [" + clientAuthenticationSharedSecretConfigKey @@ -119,7 +120,7 @@ public static void validateClientAuthenticationSettings( case NONE: default: // If type is "None", the shared secret value must not be set - if (Strings.hasText(clientAuthenticationSharedSecret)) { + if (clientAuthenticationSharedSecret.isSet()) { throw new SettingsException( "Setting [" + clientAuthenticationSharedSecretConfigKey @@ -141,7 +142,7 @@ public static void validateClientAuthenticationSettings( public static void validateClientAuthentication( final JwtRealmSettings.ClientAuthenticationType type, - final SecureString expectedSecret, + final RotatableSecret expectedSecret, final SecureString actualSecret, final String tokenPrincipal ) throws Exception { @@ -149,7 +150,7 @@ public static void validateClientAuthentication( case SHARED_SECRET: if (Strings.hasText(actualSecret) == false) { throw new Exception("Rejected client. Authentication type is [" + type + "] and secret is missing."); - } else if (expectedSecret.equals(actualSecret) == false) { + } else if (expectedSecret.matches(actualSecret) == false) { throw new Exception("Rejected client. Authentication type is [" + type + "] and secret did not match."); } LOGGER.trace("Accepted client for token [{}]. Authentication type is [{}] and secret matched.", tokenPrincipal, type); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java index b76124d5c4631..d70552f016bbf 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/RoleMappingFileBootstrapCheck.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; @@ -51,4 +52,8 @@ public static BootstrapCheck create(RealmConfig realmConfig) { return null; } + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_ROLE_MAPPINGS; + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java index 42c6a52fd4d52..63ffc82ef287d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java @@ -126,7 +126,7 @@ protected Class> return SecurityTransportXPackInfoAction.class; } - List plugins() { + public List plugins() { return plugins; } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java index 3a1ae84b7c682..9775e461c4165 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.Version; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.NodeMetadata; @@ -53,11 +54,9 @@ public void testFailureUpgradeFrom7xWithImplicitSecuritySettings() throws Except + "] has changed in the current version. " + " Security features were implicitly disabled for this node but they would now be enabled, possibly" + " preventing access to the node. " - + "See https://www.elastic.co/guide/en/elasticsearch/reference/" - + Version.CURRENT.major - + "." - + Version.CURRENT.minor - + "/security-minimal-setup.html to configure security, or explicitly disable security by " + + "See " + + ReferenceDocs.BOOTSTRAP_CHECK_SECURITY_MINIMAL_SETUP + + " to configure security, or explicitly disable security by " + "setting [xpack.security.enabled] to \"false\" in elasticsearch.yml before restarting the node." ) ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 7f174ee26cf0b..88725e015e511 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.MockLogAppender; @@ -61,7 +62,6 @@ import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.usage.UsageService; import org.elasticsearch.watcher.ResourceWatcherService; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 5faeb02f7029f..4c276993381b5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -60,6 +60,7 @@ import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.node.Node; import org.elasticsearch.rest.RestStatus; @@ -1271,7 +1272,7 @@ private static DiscoveryNode addAnotherDataNodeWithVersion( discoBuilder.add(anotherDataNode); final ClusterState.Builder newStateBuilder = ClusterState.builder(currentState); newStateBuilder.nodes(discoBuilder); - newStateBuilder.putTransportVersion(anotherDataNode.getId(), transportVersion); + newStateBuilder.putCompatibilityVersions(anotherDataNode.getId(), transportVersion, SystemIndices.SERVER_SYSTEM_MAPPINGS_VERSIONS); setState(clusterService, newStateBuilder.build()); return anotherDataNode; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java index 161e263e99784..6fab33b4d6adf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtUtilTests.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.security.authc.jwt; +import org.elasticsearch.common.settings.RotatableSecret; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; @@ -28,7 +29,7 @@ public void testClientAuthenticationTypeValidation() { clientAuthenticationTypeKey, JwtRealmSettings.ClientAuthenticationType.NONE, clientAuthenticationSharedSecretKey, - sharedSecretNullOrEmpty + new RotatableSecret(sharedSecretNullOrEmpty) ); // If type is None, verify non-empty is rejected final Exception exception1 = expectThrows( @@ -37,7 +38,7 @@ public void testClientAuthenticationTypeValidation() { clientAuthenticationTypeKey, JwtRealmSettings.ClientAuthenticationType.NONE, clientAuthenticationSharedSecretKey, - sharedSecretNonEmpty + new RotatableSecret(sharedSecretNonEmpty) ) ); assertThat( @@ -60,7 +61,7 @@ public void testClientAuthenticationTypeValidation() { clientAuthenticationTypeKey, JwtRealmSettings.ClientAuthenticationType.SHARED_SECRET, clientAuthenticationSharedSecretKey, - sharedSecretNonEmpty + new RotatableSecret(sharedSecretNonEmpty) ); // If type is SharedSecret, verify null or empty is rejected final Exception exception2 = expectThrows( @@ -69,7 +70,7 @@ public void testClientAuthenticationTypeValidation() { clientAuthenticationTypeKey, JwtRealmSettings.ClientAuthenticationType.SHARED_SECRET, clientAuthenticationSharedSecretKey, - sharedSecretNullOrEmpty + new RotatableSecret(sharedSecretNullOrEmpty) ) ); assertThat( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java index df6ba3abda55d..c87ddd116b138 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java @@ -22,9 +22,9 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.tasks.TaskManager; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.RemoteClusterPortSettings; import org.elasticsearch.transport.RequestHandlerRegistry; import org.elasticsearch.transport.TcpHeader; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java index d0cba6b2381d4..4b180548bfba4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HttpServerTransportTests.java @@ -44,10 +44,10 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.transport.netty4.TLSConfig; import org.elasticsearch.xpack.core.XPackSettings; diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java index eeb766ff70c92..c8838194f461d 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java @@ -40,9 +40,9 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ObjectParser; diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java index 3b9b7a696c620..55af966b4d7e1 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/ShutdownPlugin.java @@ -28,8 +28,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index cb147ab0af345..aa39ec1968ff0 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -37,8 +37,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java index 6e788c0ebcdc2..6b0500162b567 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -31,8 +31,8 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.XPackPlugin; diff --git a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java index 35f651f91ccb7..8c16ad6057706 100644 --- a/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java +++ b/x-pack/plugin/stack/src/main/java/org/elasticsearch/xpack/stack/StackPlugin.java @@ -20,8 +20,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java index 169ae7b04787f..521a1deafe797 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java @@ -125,7 +125,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req testIndices = testIndicesList.toArray(new String[0]); clusterStateWithIndex = ClusterState.builder(ClusterStateCreationUtils.state(numberOfNodes, testIndices, numberOfShards)) - .putTransportVersion("node01", TransportVersions.V_8_5_0) + .putCompatibilityVersions("node01", TransportVersions.V_8_5_0, Map.of()) .build(); transformTask = new Task( diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 6eebc97541123..05546d9b1345b 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -49,8 +49,8 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.NamedXContentRegistry.Entry; diff --git a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java index 81adf69aa47e4..b6b3a709924aa 100644 --- a/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java +++ b/x-pack/plugin/voting-only-node/src/main/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePlugin.java @@ -36,8 +36,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportInterceptor; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheck.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheck.java index 555787f577efe..430d6985e3444 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheck.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/EncryptSensitiveDataBootstrapCheck.java @@ -8,6 +8,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.bootstrap.BootstrapContext; +import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.watcher.WatcherField; @@ -50,4 +51,9 @@ public BootstrapCheckResult check(BootstrapContext context) { public boolean alwaysEnforce() { return true; } + + @Override + public ReferenceDocs referenceDocs() { + return ReferenceDocs.BOOTSTRAP_CHECK_ENCRYPT_SENSITIVE_DATA; + } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index afcc8a2097e94..e9a8e04b57c73 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -54,10 +54,10 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java index 59aac833111c0..e70e1ba349086 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/common/http/HttpClient.java @@ -72,6 +72,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -274,17 +275,22 @@ public HttpResponse execute(HttpRequest request) throws IOException { // headers Header[] headers = response.getAllHeaders(); Map responseHeaders = Maps.newMapWithExpectedSize(headers.length); + /* + * Headers are not case sensitive, so in the following loop we lowercase all of them. We also roll up all values for the same + * case-insensitive header into a list. + */ for (Header header : headers) { - if (responseHeaders.containsKey(header.getName())) { - String[] old = responseHeaders.get(header.getName()); + String lowerCaseHeaderName = header.getName().toLowerCase(Locale.ROOT); + if (responseHeaders.containsKey(lowerCaseHeaderName)) { + String[] old = responseHeaders.get(lowerCaseHeaderName); String[] values = new String[old.length + 1]; System.arraycopy(old, 0, values, 0, old.length); values[values.length - 1] = header.getValue(); - responseHeaders.put(header.getName(), values); + responseHeaders.put(lowerCaseHeaderName, values); } else { - responseHeaders.put(header.getName(), new String[] { header.getValue() }); + responseHeaders.put(lowerCaseHeaderName, new String[] { header.getValue() }); } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java index 5b40bac54ede6..a427b8bfcfee2 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherPluginTests.java @@ -15,10 +15,10 @@ import org.elasticsearch.indices.SystemIndexDescriptor; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.plugins.internal.DocumentParsingObserver; +import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.threadpool.ExecutorBuilder; -import org.elasticsearch.tracing.Tracer; import org.elasticsearch.xpack.core.watcher.watch.Watch; import org.elasticsearch.xpack.watcher.notification.NotificationService; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java index 2a6138380afff..0aac3cb4463e4 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/common/http/HttpClientTests.java @@ -500,6 +500,7 @@ public void testThatDuplicateHeaderKeysAreReturned() throws Exception { .setBody("foo") .addHeader("foo", "bar") .addHeader("foo", "baz") + .addHeader("Foo", "bam") .addHeader("Content-Length", "3"); webServer.enqueue(mockResponse); @@ -509,7 +510,7 @@ public void testThatDuplicateHeaderKeysAreReturned() throws Exception { assertThat(webServer.requests(), hasSize(1)); assertThat(httpResponse.headers(), hasKey("foo")); - assertThat(httpResponse.headers().get("foo"), containsInAnyOrder("bar", "baz")); + assertThat(httpResponse.headers().get("foo"), containsInAnyOrder("bar", "baz", "bam")); } // finally fixing https://github.com/elastic/x-plugins/issues/1141 - yay! Fixed due to switching to apache http client internally! diff --git a/x-pack/qa/core-rest-tests-with-security/src/yamlRestTest/resources/rest-api-spec/test/stack/10_stack.yml b/x-pack/qa/core-rest-tests-with-security/src/yamlRestTest/resources/rest-api-spec/test/stack/10_stack.yml index 47913dd98edd0..3346f5c8e58bd 100644 --- a/x-pack/qa/core-rest-tests-with-security/src/yamlRestTest/resources/rest-api-spec/test/stack/10_stack.yml +++ b/x-pack/qa/core-rest-tests-with-security/src/yamlRestTest/resources/rest-api-spec/test/stack/10_stack.yml @@ -1,4 +1,7 @@ "Stack templates can be disabled": + - skip: + version: all + reason: https://github.com/elastic/elasticsearch/issues/98163 - do: cluster.put_settings: body: