Skip to content

Commit

Permalink
Merge branch '8.x' into fix-upgrade-test
Browse files Browse the repository at this point in the history
  • Loading branch information
elasticmachine authored Nov 11, 2024
2 parents 8b1a9da + 80c151b commit e85da97
Show file tree
Hide file tree
Showing 122 changed files with 6,081 additions and 860 deletions.
1 change: 1 addition & 0 deletions .ci/dockerOnLinuxExclusions
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ sles-15.2
sles-15.3
sles-15.4
sles-15.5
sles-15.6

# These OSes are deprecated and filtered starting with 8.0.0, but need to be excluded
# for PR checks
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/

package org.elasticsearch.benchmark.indices.resolution;

import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.indices.SystemIndices;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;

import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;

@State(Scope.Benchmark)
@Fork(3)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@SuppressWarnings("unused") // invoked by benchmarking framework
public class IndexNameExpressionResolverBenchmark {

private static final String DATA_STREAM_PREFIX = "my-ds-";
private static final String INDEX_PREFIX = "my-index-";

@Param(
{
// # data streams | # indices
" 1000| 100",
" 5000| 500",
" 10000| 1000" }
)
public String resourceMix = "100|10";

@Setup
public void setUp() {
final String[] params = resourceMix.split("\\|");

int numDataStreams = toInt(params[0]);
int numIndices = toInt(params[1]);

Metadata.Builder mb = Metadata.builder();
String[] indices = new String[numIndices + numDataStreams * (numIndices + 1)];
int position = 0;
for (int i = 1; i <= numIndices; i++) {
String indexName = INDEX_PREFIX + i;
createIndexMetadata(indexName, mb);
indices[position++] = indexName;
}

for (int i = 1; i <= numDataStreams; i++) {
String dataStreamName = DATA_STREAM_PREFIX + i;
List<Index> backingIndices = new ArrayList<>();
for (int j = 1; j <= numIndices; j++) {
String backingIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, j);
backingIndices.add(createIndexMetadata(backingIndexName, mb).getIndex());
indices[position++] = backingIndexName;
}
indices[position++] = dataStreamName;
mb.put(DataStream.builder(dataStreamName, backingIndices).build());
}
int mid = indices.length / 2;
clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(mb).build();
resolver = new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY), new SystemIndices(List.of()));
indexListRequest = new Request(IndicesOptions.lenientExpandOpenHidden(), indices);
starRequest = new Request(IndicesOptions.lenientExpandOpenHidden(), "*");
String[] mixed = indices.clone();
mixed[mid] = "my-*";
mixedRequest = new Request(IndicesOptions.lenientExpandOpenHidden(), mixed);
}

private IndexMetadata createIndexMetadata(String indexName, Metadata.Builder mb) {
IndexMetadata indexMetadata = IndexMetadata.builder(indexName)
.settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()))
.numberOfShards(1)
.numberOfReplicas(0)
.build();
mb.put(indexMetadata, false);
return indexMetadata;
}

private IndexNameExpressionResolver resolver;
private ClusterState clusterState;
private Request starRequest;
private Request indexListRequest;
private Request mixedRequest;

@Benchmark
public String[] resolveResourcesListToConcreteIndices() {
return resolver.concreteIndexNames(clusterState, indexListRequest);
}

@Benchmark
public String[] resolveAllStarToConcreteIndices() {
return resolver.concreteIndexNames(clusterState, starRequest);
}

@Benchmark
public String[] resolveMixedConcreteIndices() {
return resolver.concreteIndexNames(clusterState, mixedRequest);
}

private int toInt(String v) {
return Integer.parseInt(v.trim());
}

record Request(IndicesOptions indicesOptions, String... indices) implements IndicesRequest {

}
}
6 changes: 6 additions & 0 deletions docs/changelog/114484.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 114484
summary: Add `docvalue_fields` Support for `dense_vector` Fields
area: Search
type: enhancement
issues:
- 108470
6 changes: 6 additions & 0 deletions docs/changelog/114964.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 114964
summary: Add a `monitor_stats` privilege and allow that privilege for remote cluster
privileges
area: Authorization
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/116325.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 116325
summary: Adjust analyze limit exception to be a `bad_request`
area: Analysis
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/116382.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 116382
summary: Validate missing shards after the coordinator rewrite
area: Search
type: bug
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/116478.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 116478
summary: Semantic text simple partial update
area: Search
type: bug
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -127,10 +127,11 @@ And the following may be the response:

==== Percentiles_bucket implementation

The Percentile Bucket returns the nearest input data point that is not greater than the requested percentile; it does not
interpolate between data points.

The percentiles are calculated exactly and is not an approximation (unlike the Percentiles Metric). This means
the implementation maintains an in-memory, sorted list of your data to compute the percentiles, before discarding the
data. You may run into memory pressure issues if you attempt to calculate percentiles over many millions of
data-points in a single `percentiles_bucket`.

The Percentile Bucket returns the nearest input data point to the requested percentile, rounding indices toward
positive infinity; it does not interpolate between data points. For example, if there are eight data points and
you request the `50%th` percentile, it will return the `4th` item because `ROUND_UP(.50 * (8-1))` is `4`.
39 changes: 17 additions & 22 deletions docs/reference/esql/esql-kibana.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ You can use {esql} in {kib} to query and aggregate your data, create
visualizations, and set up alerts.

This guide shows you how to use {esql} in Kibana. To follow along with the
queries, load the "Sample web logs" sample data set by clicking *Try sample
data* from the {kib} Home, selecting *Other sample data sets*, and clicking *Add
data* on the *Sample web logs* card.
queries, load the "Sample web logs" sample data set by selecting **Sample Data**
from the **Integrations** page in {kib}, selecting *Other sample data sets*,
and clicking *Add data* on the *Sample web logs* card.

[discrete]
[[esql-kibana-enable]]
Expand All @@ -30,9 +30,7 @@ However, users will be able to access existing {esql} artifacts like saved searc

// tag::esql-mode[]
To get started with {esql} in Discover, open the main menu and select
*Discover*. Next, from the Data views menu, select *Language: ES|QL*.

image::images/esql/esql-data-view-menu.png[align="center",width=33%]
*Discover*. Next, select *Try ES|QL* from the application menu bar.
// end::esql-mode[]

[discrete]
Expand All @@ -54,8 +52,9 @@ A source command can be followed by one or more <<esql-commands,processing
commands>>. In this query, the processing command is <<esql-limit>>. `LIMIT`
limits the number of rows that are retrieved.

TIP: Click the help icon (image:images/esql/esql-icon-help.svg[Static,20]) to open the
in-product reference documentation for all commands and functions.
TIP: Click the **ES|QL help** button to open the
in-product reference documentation for all commands and functions or to get
recommended queries that will help you get started.

// tag::autocomplete[]
To make it easier to write queries, auto-complete offers suggestions with
Expand All @@ -76,7 +75,7 @@ FROM kibana_sample_data_logs | LIMIT 10
====

[discrete]
==== Expand the query bar
==== Make your query readable

For readability, you can put each processing command on a new line. The
following query is identical to the previous one:
Expand All @@ -87,15 +86,12 @@ FROM kibana_sample_data_logs
| LIMIT 10
----

// tag::compact[]
To make it easier to write multi-line queries, click the double-headed arrow
button (image:images/esql/esql-icon-expand-query-bar.svg[]) to expand the query
bar:
You can do that using the **Add line breaks on pipes** button from the query editor's footer.

image::images/esql/esql-expanded-query-bar.png[align="center"]
image::https://images.contentstack.io/v3/assets/bltefdd0b53724fa2ce/bltd5554518309e10f6/672d153cfeb8f9d479ebcc6e/esql-line-breakdown.gif[Automatic line breaks for ES|QL queries]

To return to a compact query bar, click the minimize editor button
(image:images/esql/esql-icon-minimize-query-bar.svg[]).
// tag::compact[]
You can adjust the editor's height by dragging its bottom border to your liking.
// end::compact[]

[discrete]
Expand All @@ -110,9 +106,7 @@ detailed warning, expand the query bar, and click *warnings*.
==== Query history

You can reuse your recent {esql} queries in the query bar.
In the query bar click *Show recent queries*:

image::images/esql/esql-discover-show-recent-query.png[align="center",size="50%"]
In the query bar click *Show recent queries*.

You can then scroll through your recent queries:

Expand Down Expand Up @@ -220,8 +214,9 @@ FROM kibana_sample_data_logs
=== Analyze and visualize data

Between the query bar and the results table, Discover shows a date histogram
visualization. If the indices you're querying do not contain a `@timestamp`
field, the histogram is not shown.
visualization. By default, if the indices you're querying do not contain a `@timestamp`
field, the histogram is not shown. But you can use a custom time field with the `?_tstart`
and `?_tend` parameters to enable it.

The visualization adapts to the query. A query's nature determines the type of
visualization. For example, this query aggregates the total number of bytes per
Expand Down Expand Up @@ -250,7 +245,7 @@ save button (image:images/esql/esql-icon-save-visualization.svg[]). Once saved
to a dashboard, you'll be taken to the Dashboards page. You can continue to
make changes to the visualization. Click the
options button in the top-right (image:images/esql/esql-icon-options.svg[]) and
select *Edit ESQL visualization* to open the in-line editor:
select *Edit ES|QL visualization* to open the in-line editor:

image::images/esql/esql-kibana-edit-on-dashboard.png[align="center",width=66%]

Expand Down
16 changes: 7 additions & 9 deletions docs/reference/how-to/knn-search.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -72,15 +72,13 @@ least enough RAM to hold the vector data and index structures. To check the
size of the vector data, you can use the <<indices-disk-usage>> API.

Here are estimates for different element types and quantization levels:
+
--
`element_type: float`: `num_vectors * num_dimensions * 4`
`element_type: float` with `quantization: int8`: `num_vectors * (num_dimensions + 4)`
`element_type: float` with `quantization: int4`: `num_vectors * (num_dimensions/2 + 4)`
`element_type: float` with `quantization: bbq`: `num_vectors * (num_dimensions/8 + 12)`
`element_type: byte`: `num_vectors * num_dimensions`
`element_type: bit`: `num_vectors * (num_dimensions/8)`
--

* `element_type: float`: `num_vectors * num_dimensions * 4`
* `element_type: float` with `quantization: int8`: `num_vectors * (num_dimensions + 4)`
* `element_type: float` with `quantization: int4`: `num_vectors * (num_dimensions/2 + 4)`
* `element_type: float` with `quantization: bbq`: `num_vectors * (num_dimensions/8 + 12)`
* `element_type: byte`: `num_vectors * num_dimensions`
* `element_type: bit`: `num_vectors * (num_dimensions/8)`

If utilizing HNSW, the graph must also be in memory, to estimate the required bytes use `num_vectors * 4 * HNSW.m`. The
default value for `HNSW.m` is 16, so by default `num_vectors * 4 * 16`.
Expand Down
Binary file modified docs/reference/images/esql/esql-dashboard-panel.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/reference/images/esql/esql-discover-query-history.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Binary file modified docs/reference/images/esql/esql-kibana-auto-complete.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/reference/images/esql/esql-kibana-bar-chart.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/reference/images/esql/esql-kibana-create-rule.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/reference/images/esql/esql-kibana-edit-on-dashboard.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/reference/images/esql/esql-kibana-enrich-autocomplete.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified docs/reference/images/esql/esql-kibana-in-line-editor.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
Expand Up @@ -327,7 +327,7 @@ The result would then have the `errors` field set to `true` and hold the error f
"details": {
"my_admin_role": { <4>
"type": "action_request_validation_exception",
"reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,manage_data_stream_global_retention,monitor_data_stream_global_retention,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;"
"reason": "Validation Failed: 1: unknown cluster privilege [bad_cluster_privilege]. a privilege must be either one of the predefined cluster privilege names [manage_own_api_key,manage_data_stream_global_retention,monitor_data_stream_global_retention,none,cancel_task,cross_cluster_replication,cross_cluster_search,delegate_pki,grant_api_key,manage_autoscaling,manage_index_templates,manage_logstash_pipelines,manage_oidc,manage_saml,manage_search_application,manage_search_query_rules,manage_search_synonyms,manage_service_account,manage_token,manage_user_profile,monitor_connector,monitor_enrich,monitor_inference,monitor_ml,monitor_rollup,monitor_snapshot,monitor_stats,monitor_text_structure,monitor_watcher,post_behavioral_analytics_event,read_ccr,read_connector_secrets,read_fleet_secrets,read_ilm,read_pipeline,read_security,read_slm,transport_client,write_connector_secrets,write_fleet_secrets,create_snapshot,manage_behavioral_analytics,manage_ccr,manage_connector,manage_enrich,manage_ilm,manage_inference,manage_ml,manage_rollup,manage_slm,manage_watcher,monitor_data_frame_transforms,monitor_transform,manage_api_key,manage_ingest_pipelines,manage_pipeline,manage_data_frame_transforms,manage_transform,manage_security,monitor,manage,all] or a pattern over one of the available cluster actions;"
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ A successful call returns an object with "cluster", "index", and "remote_cluster
"monitor_ml",
"monitor_rollup",
"monitor_snapshot",
"monitor_stats",
"monitor_text_structure",
"monitor_transform",
"monitor_watcher",
Expand Down Expand Up @@ -152,7 +153,8 @@ A successful call returns an object with "cluster", "index", and "remote_cluster
"write"
],
"remote_cluster" : [
"monitor_enrich"
"monitor_enrich",
"monitor_stats"
]
}
--------------------------------------------------
Loading

0 comments on commit e85da97

Please sign in to comment.