From a282d399225247226d5abc3e3cd903462c973877 Mon Sep 17 00:00:00 2001 From: Heemin Kim Date: Wed, 19 Oct 2022 18:26:35 +0000 Subject: [PATCH 1/5] Support of GeoJson Point for GeoPoint field (#4597) * Support of GeoJson Point for GeoPoint field See https://github.com/opensearch-project/geospatial/issues/152 Signed-off-by: Heemin Kim * Refactored code based on comments Signed-off-by: Heemin Kim * Add unit tests for GeoJson support Signed-off-by: Heemin Kim * Resolve comments * Remove negation expression * Ignore case for GeoJson Point type to be consistent with geo_shape parsing Signed-off-by: Heemin Kim * Added yaml test for geopoint Signed-off-by: Heemin Kim Signed-off-by: Heemin Kim --- CHANGELOG.md | 1 + .../rest-api-spec/test/index/80_geo_point.yml | 145 +++++++++ .../org/opensearch/common/geo/GeoPoint.java | 6 +- .../org/opensearch/common/geo/GeoUtils.java | 280 ++++++++++++------ .../AbstractPointGeometryFieldMapper.java | 73 +++-- .../mapper/GeoPointFieldMapperTests.java | 53 ++++ .../search/geo/GeoPointParsingTests.java | 17 +- .../index/search/geo/GeoUtilsTests.java | 80 ++++- 8 files changed, 523 insertions(+), 132 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index d294eb68ee7c9..b0fa8716327ea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added in-flight cancellation of SearchShardTask based on resource consumption ([#4565](https://github.com/opensearch-project/OpenSearch/pull/4565)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) - Add groupId value propagation tests for ZIP publication task ([#4772](https://github.com/opensearch-project/OpenSearch/pull/4772)) +- Add support for GeoJson Point type in GeoPoint field ([#4597](https://github.com/opensearch-project/OpenSearch/pull/4597)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml new file mode 100644 index 0000000000000..d15529af0041a --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/80_geo_point.yml @@ -0,0 +1,145 @@ +setup: + - do: + indices.create: + index: test_1 + body: + settings: + number_of_replicas: 0 + mappings: + properties: + location: + type: geo_point + +--- +"Single point test": + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: + lon: 52.374081 + lat: 4.912350 + - index: + _index: test_1 + _id: 2 + - location: "4.901618,52.369219" + - index: + _index: test_1 + _id: 3 + - location: [ 52.371667, 4.914722 ] + - index: + _index: test_1 + _id: 4 + - location: "POINT (52.371667 4.914722)" + - index: + _index: test_1 + _id: 5 + - location: "t0v5zsq1gpzf" + - index: + _index: test_1 + _id: 6 + - location: + type: Point + coordinates: [ 52.371667, 4.914722 ] + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 51, 5 ], [ 53, 3 ] ] + + - match: { hits.total: 6 } + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 151, 15 ], [ 153, 13 ] ] + + - match: { hits.total: 0 } + +--- +"Multi points test": + - do: + bulk: + refresh: true + body: + - index: + _index: test_1 + _id: 1 + - location: + - {lon: 52.374081, lat: 4.912350} + - {lon: 152.374081, lat: 14.912350} + - index: + _index: test_1 + _id: 2 + - location: + - "4.901618,52.369219" + - "14.901618,152.369219" + - index: + _index: test_1 + _id: 3 + - location: + - [ 52.371667, 4.914722 ] + - [ 152.371667, 14.914722 ] + - index: + _index: test_1 + _id: 4 + - location: + - "POINT (52.371667 4.914722)" + - "POINT (152.371667 14.914722)" + - index: + _index: test_1 + _id: 5 + - location: + - "t0v5zsq1gpzf" + - "x6skg0zbhnum" + - index: + _index: test_1 + _id: 6 + - location: + - {type: Point, coordinates: [ 52.371667, 4.914722 ]} + - {type: Point, coordinates: [ 152.371667, 14.914722 ]} + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 51, 5 ], [ 53, 3 ] ] + + - match: { hits.total: 6 } + + - do: + search: + index: test_1 + rest_total_hits_as_int: true + body: + query: + geo_shape: + location: + shape: + type: "envelope" + coordinates: [ [ 151, 15 ], [ 153, 13 ] ] + + - match: { hits.total: 6 } diff --git a/server/src/main/java/org/opensearch/common/geo/GeoPoint.java b/server/src/main/java/org/opensearch/common/geo/GeoPoint.java index a2b06dccded8c..c59a9046e0318 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoPoint.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoPoint.java @@ -119,7 +119,11 @@ public GeoPoint resetFromString(String value, final boolean ignoreZValue, Effect public GeoPoint resetFromCoordinates(String value, final boolean ignoreZValue) { String[] vals = value.split(","); if (vals.length > 3) { - throw new OpenSearchParseException("failed to parse [{}], expected 2 or 3 coordinates " + "but found: [{}]", vals.length); + throw new OpenSearchParseException( + "failed to parse [{}], expected 2 or 3 coordinates " + "but found: [{}]", + value, + vals.length + ); } final double lat; final double lon; diff --git a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java index 5534967d559d6..96be418c85179 100644 --- a/server/src/main/java/org/opensearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/opensearch/common/geo/GeoUtils.java @@ -40,10 +40,10 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentParser; -import org.opensearch.common.xcontent.XContentParser.Token; import org.opensearch.common.xcontent.XContentSubParser; import org.opensearch.common.xcontent.support.MapXContentParser; import org.opensearch.common.xcontent.support.XContentMapValues; +import org.opensearch.geometry.ShapeType; import org.opensearch.index.fielddata.FieldData; import org.opensearch.index.fielddata.GeoPointValues; import org.opensearch.index.fielddata.MultiGeoPointValues; @@ -53,6 +53,7 @@ import java.io.IOException; import java.util.Collections; +import java.util.HashMap; /** * Useful geo utilities @@ -60,7 +61,8 @@ * @opensearch.internal */ public class GeoUtils { - + private static final String ERR_MSG_INVALID_TOKEN = "token [{}] not allowed"; + private static final String ERR_MSG_INVALID_FIELDS = "field must be either [lon|lat], [type|coordinates], or [geohash]"; /** Maximum valid latitude in degrees. */ public static final double MAX_LAT = 90.0; /** Minimum valid latitude in degrees. */ @@ -74,6 +76,8 @@ public class GeoUtils { public static final String LONGITUDE = "lon"; public static final String GEOHASH = "geohash"; + public static final String GEOJSON_TYPE = "type"; + public static final String GEOJSON_COORDS = "coordinates"; /** Earth ellipsoid major axis defined by WGS 84 in meters */ public static final double EARTH_SEMI_MAJOR_AXIS = 6378137.0; // meters (WGS 84) @@ -444,112 +448,202 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina * Parse a {@link GeoPoint} with a {@link XContentParser}. A geopoint has one of the following forms: * *
    - *
  • Object:
    {"lat": <latitude>, "lon": <longitude>}
  • - *
  • String:
    "<latitude>,<longitude>"
  • - *
  • Geohash:
    "<geohash>"
  • - *
  • Array:
    [<longitude>,<latitude>]
  • + *
  • Object:
    {@code {"lat": , "lon": 
  • + *
  • String:
    {@code ","}
  • + *
  • GeoHash:
    {@code ""}
  • + *
  • WKT:
    {@code "POINT ( )"}
  • + *
  • Array:
    {@code [, ]}
  • + *
  • GeoJson:
    {@code {"type": "Point", "coordinates": [, ]}}
  • *
* + * * @param parser {@link XContentParser} to parse the value from * @param point A {@link GeoPoint} that will be reset by the values parsed + * @param ignoreZValue tells to ignore z value or throw exception when there is a z value + * @param effectivePoint tells which point to use for GeoHash form * @return new {@link GeoPoint} parsed from the parse */ - public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, final boolean ignoreZValue, EffectivePoint effectivePoint) - throws IOException, OpenSearchParseException { - double lat = Double.NaN; - double lon = Double.NaN; - String geohash = null; - NumberFormatException numberFormatException = null; - - if (parser.currentToken() == Token.START_OBJECT) { - try (XContentSubParser subParser = new XContentSubParser(parser)) { - while (subParser.nextToken() != Token.END_OBJECT) { - if (subParser.currentToken() == Token.FIELD_NAME) { - String field = subParser.currentName(); - if (LATITUDE.equals(field)) { - subParser.nextToken(); - switch (subParser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lat = subParser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new OpenSearchParseException("latitude must be a number"); - } - } else if (LONGITUDE.equals(field)) { - subParser.nextToken(); - switch (subParser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lon = subParser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new OpenSearchParseException("longitude must be a number"); - } - } else if (GEOHASH.equals(field)) { - if (subParser.nextToken() == Token.VALUE_STRING) { - geohash = subParser.text(); - } else { - throw new OpenSearchParseException("geohash must be a string"); - } - } else { - throw new OpenSearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); - } - } else { - throw new OpenSearchParseException("token [{}] not allowed", subParser.currentToken()); + public static GeoPoint parseGeoPoint( + final XContentParser parser, + final GeoPoint point, + final boolean ignoreZValue, + final EffectivePoint effectivePoint + ) throws IOException, OpenSearchParseException { + switch (parser.currentToken()) { + case START_OBJECT: + parseGeoPointObject(parser, point, ignoreZValue, effectivePoint); + break; + case START_ARRAY: + parseGeoPointArray(parser, point, ignoreZValue); + break; + case VALUE_STRING: + String val = parser.text(); + point.resetFromString(val, ignoreZValue, effectivePoint); + break; + default: + throw new OpenSearchParseException("geo_point expected"); + } + return point; + } + + private static GeoPoint parseGeoPointObject( + final XContentParser parser, + final GeoPoint point, + final boolean ignoreZValue, + final GeoUtils.EffectivePoint effectivePoint + ) throws IOException { + try (XContentSubParser subParser = new XContentSubParser(parser)) { + if (subParser.nextToken() != XContentParser.Token.FIELD_NAME) { + throw new OpenSearchParseException(ERR_MSG_INVALID_TOKEN, subParser.currentToken()); + } + + String field = subParser.currentName(); + if (LONGITUDE.equals(field) || LATITUDE.equals(field)) { + parseGeoPointObjectBasicFields(subParser, point); + } else if (GEOHASH.equals(field)) { + parseGeoHashFields(subParser, point, effectivePoint); + } else if (GEOJSON_TYPE.equals(field) || GEOJSON_COORDS.equals(field)) { + parseGeoJsonFields(subParser, point, ignoreZValue); + } else { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + + if (subParser.nextToken() != XContentParser.Token.END_OBJECT) { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + + return point; + } + } + + private static GeoPoint parseGeoPointObjectBasicFields(final XContentParser parser, final GeoPoint point) throws IOException { + HashMap data = new HashMap<>(); + for (int i = 0; i < 2; i++) { + if (i != 0) { + parser.nextToken(); + } + + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + break; + } + + String field = parser.currentName(); + if (LONGITUDE.equals(field) == false && LATITUDE.equals(field) == false) { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + switch (parser.nextToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + data.put(field, parser.doubleValue(true)); + } catch (NumberFormatException e) { + throw new OpenSearchParseException("[{}] and [{}] must be valid double values", e, LONGITUDE, LATITUDE); } + break; + default: + throw new OpenSearchParseException("{} must be a number", field); + } + } + + if (data.get(LONGITUDE) == null) { + throw new OpenSearchParseException("field [{}] missing", LONGITUDE); + } + if (data.get(LATITUDE) == null) { + throw new OpenSearchParseException("field [{}] missing", LATITUDE); + } + + return point.reset(data.get(LATITUDE), data.get(LONGITUDE)); + } + + private static GeoPoint parseGeoHashFields( + final XContentParser parser, + final GeoPoint point, + final GeoUtils.EffectivePoint effectivePoint + ) throws IOException { + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + throw new OpenSearchParseException(ERR_MSG_INVALID_TOKEN, parser.currentToken()); + } + + if (GEOHASH.equals(parser.currentName()) == false) { + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); + } + + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("{} must be a string", GEOHASH); + } + + return point.parseGeoHash(parser.text(), effectivePoint); + } + + private static GeoPoint parseGeoJsonFields(final XContentParser parser, final GeoPoint point, final boolean ignoreZValue) + throws IOException { + boolean hasTypePoint = false; + boolean hasCoordinates = false; + for (int i = 0; i < 2; i++) { + if (i != 0) { + parser.nextToken(); + } + + if (parser.currentToken() != XContentParser.Token.FIELD_NAME) { + if (hasTypePoint == false) { + throw new OpenSearchParseException("field [{}] missing", GEOJSON_TYPE); + } + if (hasCoordinates == false) { + throw new OpenSearchParseException("field [{}] missing", GEOJSON_COORDS); } } - if (geohash != null) { - if (!Double.isNaN(lat) || !Double.isNaN(lon)) { - throw new OpenSearchParseException("field must be either lat/lon or geohash"); - } else { - return point.parseGeoHash(geohash, effectivePoint); + + if (GEOJSON_TYPE.equals(parser.currentName())) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new OpenSearchParseException("{} must be a string", GEOJSON_TYPE); } - } else if (numberFormatException != null) { - throw new OpenSearchParseException("[{}] and [{}] must be valid double values", numberFormatException, LATITUDE, LONGITUDE); - } else if (Double.isNaN(lat)) { - throw new OpenSearchParseException("field [{}] missing", LATITUDE); - } else if (Double.isNaN(lon)) { - throw new OpenSearchParseException("field [{}] missing", LONGITUDE); + + // To be consistent with geo_shape parsing, ignore case here as well. + if (ShapeType.POINT.name().equalsIgnoreCase(parser.text()) == false) { + throw new OpenSearchParseException("{} must be Point", GEOJSON_TYPE); + } + hasTypePoint = true; + } else if (GEOJSON_COORDS.equals(parser.currentName())) { + if (parser.nextToken() != XContentParser.Token.START_ARRAY) { + throw new OpenSearchParseException("{} must be an array", GEOJSON_COORDS); + } + parseGeoPointArray(parser, point, ignoreZValue); + hasCoordinates = true; } else { - return point.reset(lat, lon); + throw new OpenSearchParseException(ERR_MSG_INVALID_FIELDS); } + } - } else if (parser.currentToken() == Token.START_ARRAY) { - try (XContentSubParser subParser = new XContentSubParser(parser)) { - int element = 0; - while (subParser.nextToken() != Token.END_ARRAY) { - if (subParser.currentToken() == Token.VALUE_NUMBER) { - element++; - if (element == 1) { - lon = subParser.doubleValue(); - } else if (element == 2) { - lat = subParser.doubleValue(); - } else if (element == 3) { - GeoPoint.assertZValue(ignoreZValue, subParser.doubleValue()); - } else { - throw new OpenSearchParseException("[geo_point] field type does not accept > 3 dimensions"); - } - } else { - throw new OpenSearchParseException("numeric value expected"); - } + return point; + } + + private static GeoPoint parseGeoPointArray(final XContentParser parser, final GeoPoint point, final boolean ignoreZValue) + throws IOException { + try (XContentSubParser subParser = new XContentSubParser(parser)) { + double x = Double.NaN; + double y = Double.NaN; + + int element = 0; + while (subParser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { + throw new OpenSearchParseException("numeric value expected"); + } + element++; + if (element == 1) { + x = parser.doubleValue(); + } else if (element == 2) { + y = parser.doubleValue(); + } else if (element == 3) { + GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + } else { + throw new OpenSearchParseException("[geo_point] field type does not accept more than 3 values"); } } - return point.reset(lat, lon); - } else if (parser.currentToken() == Token.VALUE_STRING) { - String val = parser.text(); - return point.resetFromString(val, ignoreZValue, effectivePoint); - } else { - throw new OpenSearchParseException("geo_point expected"); + + if (element < 2) { + throw new OpenSearchParseException("[geo_point] field type should have at least two dimensions"); + } + return point.reset(y, x); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java index b546eeca1ec0a..305351ca63b9a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/AbstractPointGeometryFieldMapper.java @@ -36,11 +36,14 @@ import org.opensearch.common.CheckedBiFunction; import org.opensearch.common.Explicit; import org.opensearch.common.ParseField; -import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.geo.GeometryFormat; import org.opensearch.common.geo.GeometryParser; +import org.opensearch.common.xcontent.DeprecationHandler; import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.geometry.Geometry; import org.opensearch.geometry.Point; @@ -57,7 +60,7 @@ import static org.opensearch.index.mapper.TypeParsers.parseField; /** - * Base class for for spatial fields that only support indexing points + * Base class for spatial fields that only support indexing points * * @opensearch.internal */ @@ -281,32 +284,27 @@ private P process(P in) { @Override public List

parse(XContentParser parser) throws IOException, ParseException { - if (parser.currentToken() == XContentParser.Token.START_ARRAY) { - XContentParser.Token token = parser.nextToken(); - P point = pointSupplier.get(); - ArrayList

points = new ArrayList<>(); - if (token == XContentParser.Token.VALUE_NUMBER) { - double x = parser.doubleValue(); - parser.nextToken(); - double y = parser.doubleValue(); - token = parser.nextToken(); - if (token == XContentParser.Token.VALUE_NUMBER) { - GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); - } else if (token != XContentParser.Token.END_ARRAY) { - throw new OpenSearchParseException("field type does not accept > 3 dimensions"); + parser.nextToken(); + if (parser.currentToken() == XContentParser.Token.VALUE_NUMBER) { + XContentBuilder xContentBuilder = reConstructArrayXContent(parser); + try ( + XContentParser subParser = createParser( + parser.getXContentRegistry(), + parser.getDeprecationHandler(), + xContentBuilder + ); + ) { + return Collections.singletonList(process(objectParser.apply(subParser, pointSupplier.get()))); } - - point.resetCoords(x, y); - points.add(process(point)); } else { - while (token != XContentParser.Token.END_ARRAY) { - points.add(process(objectParser.apply(parser, point))); - point = pointSupplier.get(); - token = parser.nextToken(); + ArrayList

points = new ArrayList<>(); + while (parser.currentToken() != XContentParser.Token.END_ARRAY) { + points.add(process(objectParser.apply(parser, pointSupplier.get()))); + parser.nextToken(); } + return points; } - return points; } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { if (nullValue == null) { return null; @@ -318,6 +316,35 @@ public List

parse(XContentParser parser) throws IOException, ParseException { } } + private XContentParser createParser( + NamedXContentRegistry namedXContentRegistry, + DeprecationHandler deprecationHandler, + XContentBuilder xContentBuilder + ) throws IOException { + XContentParser subParser = xContentBuilder.contentType() + .xContent() + .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()); + subParser.nextToken(); + return subParser; + } + + private XContentBuilder reConstructArrayXContent(XContentParser parser) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder().startArray(); + int count = 0; + while (parser.currentToken() != XContentParser.Token.END_ARRAY) { + if (++count > 3) { + break; + } + if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { + throw new OpenSearchParseException("numeric value expected"); + } + builder.value(parser.doubleValue()); + parser.nextToken(); + } + builder.endArray(); + return builder; + } + @Override public Object format(List

points, String format) { List result = new ArrayList<>(); diff --git a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java index 6a6e6b190bff3..1b4c95d9ceb8f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/GeoPointFieldMapperTests.java @@ -352,6 +352,59 @@ public void testInvalidGeopointValuesIgnored() throws Exception { ); } + public void testGeoJsonSingleValue() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse( + source(b -> b.startObject("field").field("type", "Point").array("coordinates", new double[] { 1.1, 1.2 }).endObject()) + ); + assertThat(doc.rootDoc().getField("field"), notNullValue()); + } + + public void testGeoJsonArray() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + ParsedDocument doc = mapper.parse( + source( + b -> b.startArray("field") + .startObject() + .field("type", "Point") + .array("coordinates", new double[] { 1.1, 1.2 }) + .endObject() + .startObject() + .field("type", "Point") + .array("coordinates", new double[] { 1.3, 1.4 }) + .endObject() + .endArray() + ) + ); + assertThat(doc.rootDoc().getField("field"), notNullValue()); + assertThat(doc.rootDoc().getFields("field"), arrayWithSize(4)); + } + + public void testGeoJsonIgnoreZValue() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_z_value", true))); + ParsedDocument doc = mapper.parse( + source(b -> b.startObject("field").field("type", "Point").array("coordinates", new double[] { 1.1, 1.2, 1.3 }).endObject()) + ); + assertThat(doc.rootDoc().getField("field"), notNullValue()); + } + + public void testGeoJsonZValueException() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_z_value", false))); + Exception e = expectThrows( + MapperParsingException.class, + () -> mapper.parse( + source(b -> b.startObject("field").field("type", "Point").array("coordinates", new double[] { 1.1, 1.2, 1.3 }).endObject()) + ) + ); + assertThat(e.getCause().getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + } + + public void testGeoJsonIgnoreInvalidForm() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "geo_point").field("ignore_malformed", "true"))); + ParsedDocument doc = mapper.parse(source(b -> b.startObject("field").array("coordinates", new double[] { 1.1, 1.2 }).endObject())); + assertThat(doc.rootDoc().getField("field"), nullValue()); + } + @Override protected GeoPointFieldMapper.Builder newBuilder() { return new GeoPointFieldMapper.Builder("geo"); diff --git a/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java b/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java index 6b937ad881f00..ea8d660b78157 100644 --- a/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java +++ b/server/src/test/java/org/opensearch/index/search/geo/GeoPointParsingTests.java @@ -50,6 +50,7 @@ import static org.hamcrest.Matchers.is; public class GeoPointParsingTests extends OpenSearchTestCase { + private static final String ERR_MSG_INVALID_FIELDS = "field must be either [lon|lat], [type|coordinates], or [geohash]"; private static final double TOLERANCE = 1E-5; public void testGeoPointReset() throws IOException { @@ -142,12 +143,12 @@ public void testInvalidPointEmbeddedObject() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -160,12 +161,12 @@ public void testInvalidPointLatHashMix() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -179,12 +180,12 @@ public void testInvalidPointLonHashMix() throws IOException { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -197,13 +198,13 @@ public void testInvalidField() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } try (XContentParser parser2 = createParser(JsonXContent.jsonXContent, BytesReference.bytes(content))) { parser2.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(toObject(parser2), randomBoolean())); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } diff --git a/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java index bb4b057d01414..f5e15b0342028 100644 --- a/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/opensearch/index/search/geo/GeoUtilsTests.java @@ -35,6 +35,8 @@ import org.apache.lucene.spatial.prefix.tree.Cell; import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; +import org.locationtech.spatial4j.context.SpatialContext; +import org.locationtech.spatial4j.distance.DistanceUtils; import org.opensearch.OpenSearchParseException; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.geo.GeoUtils; @@ -43,12 +45,10 @@ import org.opensearch.common.xcontent.XContentParser.Token; import org.opensearch.geometry.utils.Geohash; import org.opensearch.test.OpenSearchTestCase; -import org.locationtech.spatial4j.context.SpatialContext; -import org.locationtech.spatial4j.distance.DistanceUtils; +import org.opensearch.test.geo.RandomGeoGenerator; import java.io.IOException; -import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.closeTo; import static org.hamcrest.Matchers.containsString; @@ -57,8 +57,10 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; public class GeoUtilsTests extends OpenSearchTestCase { + private static final String ERR_MSG_INVALID_FIELDS = "field must be either [lon|lat], [type|coordinates], or [geohash]"; private static final char[] BASE_32 = { '0', '1', @@ -601,7 +603,7 @@ public void testParseGeoPointLonWrongType() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("longitude must be a number")); + assertThat(e.getMessage(), is("lon must be a number")); assertThat(parser.currentToken(), is(Token.END_OBJECT)); assertNull(parser.nextToken()); } @@ -613,7 +615,7 @@ public void testParseGeoPointLatWrongType() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("latitude must be a number")); + assertThat(e.getMessage(), is("lat must be a number")); assertThat(parser.currentToken(), is(Token.END_OBJECT)); assertNull(parser.nextToken()); } @@ -626,7 +628,7 @@ public void testParseGeoPointExtraField() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), is("field must be either [lat], [lon] or [geohash]")); + assertThat(e.getMessage(), is(ERR_MSG_INVALID_FIELDS)); } } @@ -638,7 +640,7 @@ public void testParseGeoPointLonLatGeoHash() throws IOException { try (XContentParser parser = createParser(json)) { parser.nextToken(); Exception e = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); - assertThat(e.getMessage(), containsString("field must be either lat/lon or geohash")); + assertThat(e.getMessage(), containsString(ERR_MSG_INVALID_FIELDS)); } } @@ -698,6 +700,70 @@ public void testParseGeoPointInvalidType() throws IOException { } } + public void testParserGeoPointGeoJson() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder json1 = jsonBuilder().startObject().field("type", "Point").array("coordinates", coordinates).endObject(); + try (XContentParser parser = createParser(json1)) { + parser.nextToken(); + GeoPoint paredPoint = GeoUtils.parseGeoPoint(parser); + assertEquals(geoPoint, paredPoint); + } + + XContentBuilder json2 = jsonBuilder().startObject().field("type", "PoInT").array("coordinates", coordinates).endObject(); + try (XContentParser parser = createParser(json2)) { + parser.nextToken(); + GeoPoint paredPoint = GeoUtils.parseGeoPoint(parser); + assertEquals(geoPoint, paredPoint); + } + } + + public void testParserGeoPointGeoJsonMissingField() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder missingType = jsonBuilder().startObject().array("coordinates", coordinates).endObject(); + expectParseException(missingType, "field [type] missing"); + + XContentBuilder missingCoordinates = jsonBuilder().startObject().field("type", "Point").endObject(); + expectParseException(missingCoordinates, "field [coordinates] missing"); + } + + public void testParserGeoPointGeoJsonUnknownField() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder unknownField = jsonBuilder().startObject() + .field("type", "Point") + .array("coordinates", coordinates) + .field("unknown", "value") + .endObject(); + expectParseException(unknownField, "field must be either [lon|lat], [type|coordinates], or [geohash]"); + } + + public void testParserGeoPointGeoJsonInvalidValue() throws IOException { + GeoPoint geoPoint = RandomGeoGenerator.randomPoint(random()); + double[] coordinates = { geoPoint.getLon(), geoPoint.getLat() }; + XContentBuilder invalidGeoJsonType = jsonBuilder().startObject() + .field("type", "invalid") + .array("coordinates", coordinates) + .endObject(); + expectParseException(invalidGeoJsonType, "type must be Point"); + + String[] coordinatesInString = { String.valueOf(geoPoint.getLon()), String.valueOf(geoPoint.getLat()) }; + XContentBuilder invalideCoordinatesType = jsonBuilder().startObject() + .field("type", "Point") + .array("coordinates", coordinatesInString) + .endObject(); + expectParseException(invalideCoordinatesType, "numeric value expected"); + } + + private void expectParseException(XContentBuilder content, String errMsg) throws IOException { + try (XContentParser parser = createParser(content)) { + parser.nextToken(); + OpenSearchParseException ex = expectThrows(OpenSearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); + assertEquals(errMsg, ex.getMessage()); + } + } + public void testPrefixTreeCellSizes() { assertThat(GeoUtils.EARTH_SEMI_MAJOR_AXIS, equalTo(DistanceUtils.EARTH_EQUATORIAL_RADIUS_KM * 1000)); assertThat(GeoUtils.quadTreeCellWidth(0), lessThanOrEqualTo(GeoUtils.EARTH_EQUATOR)); From 04eb8171b8e24222282d430e0b6514822778a77c Mon Sep 17 00:00:00 2001 From: Nishchay Malhotra <114057571+nishchay21@users.noreply.github.com> Date: Thu, 20 Oct 2022 01:04:10 +0530 Subject: [PATCH 2/5] Adding feature to exclude indexes starting with dot from shard validator (#4695) * Adding feature to exclude indexes starting with dot from shard validator Signed-off-by: Nishchay Malhotra --- CHANGELOG.md | 1 + .../cluster/shards/ClusterShardLimitIT.java | 306 +++++++++++++++++- .../common/settings/ClusterSettings.java | 1 + .../indices/ShardLimitValidator.java | 71 +++- .../MetadataCreateIndexServiceTests.java | 10 +- .../MetadataIndexTemplateServiceTests.java | 4 +- .../indices/ShardLimitValidatorTests.java | 229 ++++++++++++- 7 files changed, 601 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0fa8716327ea..aae68b7f715d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -163,6 +163,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. - Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) +- Added feature to ignore indexes starting with dot during shard limit validation.([#4695](https://github.com/opensearch-project/OpenSearch/pull/4695)) ### Changed ### Deprecated ### Removed diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index a92849a077376..a88d42c07f8d6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -43,15 +43,28 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Priority; +import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.internal.io.IOUtils; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.MockHttpTransport; +import org.opensearch.test.NodeConfigurationSource; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.nio.MockNioTransportPlugin; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.function.Function; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -63,12 +76,18 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class ClusterShardLimitIT extends OpenSearchIntegTestCase { private static final String shardsPerNodeKey = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(); + private static final String ignoreDotIndexKey = ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(); public void testSettingClusterMaxShards() { int shardsPerNode = between(1, 500_000); setShardsPerNode(shardsPerNode); } + public void testSettingIgnoreDotIndexes() { + boolean ignoreDotIndexes = randomBoolean(); + setIgnoreDotIndex(ignoreDotIndexes); + } + public void testMinimumPerNode() { int negativeShardsPerNode = between(-50_000, 0); try { @@ -100,7 +119,6 @@ public void testIndexCreationOverLimit() { ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); setShardsPerNode(counts.getShardsPerNode()); - // Create an index that will bring us up to the limit createIndex( "test", @@ -127,6 +145,164 @@ public void testIndexCreationOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } + /** + * The test checks if the index starting with the dot can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would succeed. + */ + public void testIndexCreationOverLimitForDotIndexesSucceeds() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, dataNodes * dataNodes) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + createIndex( + ".test-index", + Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + clusterState = client().admin().cluster().prepareState().get().getState(); + assertTrue(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the dot should not be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to false. If the cluster.ignore_Dot_indexes is set to false index creation of + * indexes starting with dot would fail as well. + */ + public void testIndexCreationOverLimitForDotIndexesFail() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the .ds- can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would only succeed and dataStream indexes would still have validation applied. + */ + public void testIndexCreationOverLimitForDataStreamIndexes() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".ds-test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".ds-test-index")); + } + public void testIndexCreationOverLimitFromTemplate() { int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); @@ -414,6 +590,100 @@ public void testOpenIndexOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } + public void testIgnoreDotSettingOnMultipleNodes() throws IOException, InterruptedException { + int maxAllowedShardsPerNode = 10, indexPrimaryShards = 11, indexReplicaShards = 1; + + InternalTestCluster cluster = new InternalTestCluster( + randomLong(), + createTempDir(), + true, + true, + 0, + 0, + "cluster", + new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(ClusterShardLimitIT.this.nodeSettings(nodeOrdinal)) + .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) + .build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + }, + 0, + "cluster-", + Arrays.asList( + TestSeedPlugin.class, + MockHttpTransport.TestPlugin.class, + MockTransportService.TestPlugin.class, + MockNioTransportPlugin.class, + InternalSettingsPlugin.class, + MockRepository.Plugin.class + ), + Function.identity() + ); + cluster.beforeTest(random()); + + // Starting 3 ClusterManagerOnlyNode nodes + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", true).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Starting 2 data nodes + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Setting max shards per node to be 10 + cluster.client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, maxAllowedShardsPerNode)) + .get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + + // As active ClusterManagerNode setting takes precedence killing the active one. + // This would be the first one where cluster.ignore_dot_indexes is true because the above calls are blocking. + cluster.stopCurrentClusterManagerNode(); + + // Waiting for all shards to get assigned + cluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + try { + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index1") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + } catch (IllegalArgumentException e) { + ClusterHealthResponse clusterHealth = cluster.client().admin().cluster().prepareHealth().get(); + int currentActiveShards = clusterHealth.getActiveShards(); + int dataNodeCount = clusterHealth.getNumberOfDataNodes(); + int extraShardCount = indexPrimaryShards * (1 + indexReplicaShards); + verifyException(maxAllowedShardsPerNode * dataNodeCount, currentActiveShards, extraShardCount, e); + } + + IOUtils.close(cluster); + } + private int ensureMultipleDataNodes(int dataNodes) { if (dataNodes == 1) { internalCluster().startNode(dataNode()); @@ -457,6 +727,29 @@ private void setShardsPerNode(int shardsPerNode) { } } + private void setIgnoreDotIndex(boolean ignoreDotIndex) { + try { + ClusterUpdateSettingsResponse response; + if (frequently()) { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getPersistentSettings().getAsBoolean(ignoreDotIndexKey, true)); + } else { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getTransientSettings().getAsBoolean(ignoreDotIndexKey, true)); + } + } catch (IllegalArgumentException ex) { + fail(ex.getMessage()); + } + } + private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentException e) { int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); @@ -471,4 +764,15 @@ private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentE assertEquals(expectedError, e.getMessage()); } + private void verifyException(int maxShards, int currentShards, int extraShards, IllegalArgumentException e) { + String expectedError = "Validation Failed: 1: this action would add [" + + extraShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;"; + assertEquals(expectedError, e.getMessage()); + } + } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 320cb5457b21c..12a3a883af347 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -255,6 +255,7 @@ public void apply(Settings value, Settings current, Settings previous) { Metadata.SETTING_READ_ONLY_SETTING, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING, ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE, + ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index a5ec6cbecaf55..e803e387448bc 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -33,6 +33,7 @@ package org.opensearch.indices; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.ValidationException; @@ -64,12 +65,23 @@ public class ShardLimitValidator { Setting.Property.Dynamic, Setting.Property.NodeScope ); + + public static final Setting SETTING_CLUSTER_IGNORE_DOT_INDEXES = Setting.boolSetting( + "cluster.ignore_dot_indexes", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + protected final AtomicInteger shardLimitPerNode = new AtomicInteger(); private final SystemIndices systemIndices; + private volatile boolean ignoreDotIndexes; public ShardLimitValidator(final Settings settings, ClusterService clusterService, SystemIndices systemIndices) { this.shardLimitPerNode.set(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(settings)); + this.ignoreDotIndexes = SETTING_CLUSTER_IGNORE_DOT_INDEXES.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_MAX_SHARDS_PER_NODE, this::setShardLimitPerNode); + clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_IGNORE_DOT_INDEXES, this::setIgnoreDotIndexes); this.systemIndices = systemIndices; } @@ -85,8 +97,15 @@ public int getShardLimitPerNode() { return shardLimitPerNode.get(); } + private void setIgnoreDotIndexes(boolean newValue) { + this.ignoreDotIndexes = newValue; + } + /** * Checks whether an index can be created without going over the cluster shard limit. + * Validate shard limit only for non system indices as it is not hard limit anyways. + * Further also validates if the cluster.ignore_dot_indexes is set to true. + * If so then it does not validate any index which starts with '.' except data-stream index. * * @param indexName the name of the index being created * @param settings the settings of the index to be created @@ -94,8 +113,12 @@ public int getShardLimitPerNode() { * @throws ValidationException if creating this index would put the cluster over the cluster shard limit */ public void validateShardLimit(final String indexName, final Settings settings, final ClusterState state) { - // Validate shard limit only for non system indices as it is not hard limit anyways - if (systemIndices.validateSystemIndex(indexName)) { + /* + Validate shard limit only for non system indices as it is not hard limit anyways. + Further also validates if the cluster.ignore_dot_indexes is set to true. + If so then it does not validate any index which starts with '.'. + */ + if (shouldIndexBeIgnored(indexName)) { return; } @@ -113,7 +136,9 @@ public void validateShardLimit(final String indexName, final Settings settings, /** * Validates whether a list of indices can be opened without going over the cluster shard limit. Only counts indices which are - * currently closed and will be opened, ignores indices which are already open. + * currently closed and will be opened, ignores indices which are already open. Adding to this it validates the + * shard limit only for non system indices and if the cluster.ignore_dot_indexes property is set to true + * then the indexes starting with '.' are ignored except the data-stream indexes. * * @param currentState The current cluster state. * @param indicesToOpen The indices which are to be opened. @@ -121,8 +146,13 @@ public void validateShardLimit(final String indexName, final Settings settings, */ public void validateShardLimit(ClusterState currentState, Index[] indicesToOpen) { int shardsToOpen = Arrays.stream(indicesToOpen) - // Validate shard limit only for non system indices as it is not hard limit anyways - .filter(index -> !systemIndices.validateSystemIndex(index.getName())) + /* + Validate shard limit only for non system indices as it is not hard limit anyways. + Further also validates if the cluster.ignore_dot_indexes is set to true. + If so then it does not validate any index which starts with '.' + however data-stream indexes are still validated. + */ + .filter(index -> !shouldIndexBeIgnored(index.getName())) .filter(index -> currentState.metadata().index(index).getState().equals(IndexMetadata.State.CLOSE)) .mapToInt(index -> getTotalShardCount(currentState, index)) .sum(); @@ -140,6 +170,37 @@ private static int getTotalShardCount(ClusterState state, Index index) { return indexMetadata.getNumberOfShards() * (1 + indexMetadata.getNumberOfReplicas()); } + /** + * Returns true if the index should be ignored during validation. + * Index is ignored if it is a system index or if cluster.ignore_dot_indexes is set to true + * then indexes which are starting with dot and are not data stream index are ignored. + * + * @param indexName The index which needs to be validated. + */ + private boolean shouldIndexBeIgnored(String indexName) { + if (this.ignoreDotIndexes) { + return validateDotIndex(indexName) && !isDataStreamIndex(indexName); + } else return systemIndices.validateSystemIndex(indexName); + } + + /** + * Returns true if the index name starts with '.' else false. + * + * @param indexName The index which needs to be validated. + */ + private boolean validateDotIndex(String indexName) { + return indexName.charAt(0) == '.'; + } + + /** + * Returns true if the index is dataStreamIndex false otherwise. + * + * @param indexName The index which needs to be validated. + */ + private boolean isDataStreamIndex(String indexName) { + return indexName.startsWith(DataStream.BACKING_INDEX_PREFIX); + } + /** * Checks to see if an operation can be performed without taking the cluster over the cluster-wide shard limit. * Returns an error message if appropriate, or an empty {@link Optional} otherwise. diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index ac03ab49bbbbd..5348fd0a778f7 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -592,7 +592,7 @@ public void testValidateIndexName() throws Exception { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), null, null, threadPool, @@ -674,7 +674,7 @@ public void testValidateDotIndex() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), null, null, threadPool, @@ -1090,7 +1090,7 @@ public void testvalidateIndexSettings() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), new Environment(Settings.builder().put("path.home", "dummy").build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool, @@ -1234,7 +1234,7 @@ public void testIndexLifecycleNameSetting() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), new Environment(Settings.builder().put("path.home", "dummy").build(), null), new IndexScopedSettings(ilnSetting, Collections.emptySet()), threadPool, @@ -1311,7 +1311,7 @@ private static Map convertMappings(ImmutableOpenMap< } private ShardLimitValidator randomShardLimitService() { - return createTestShardLimitService(randomIntBetween(10, 10000)); + return createTestShardLimitService(randomIntBetween(10, 10000), false); } private void withTemporaryClusterService(BiConsumer consumer) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 887d8469bd01c..aa1c500030bd6 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -2100,7 +2100,7 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000)), + createTestShardLimitService(randomIntBetween(1, 1000), false), new Environment(builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, null, @@ -2163,7 +2163,7 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { indicesService, null, null, - createTestShardLimitService(randomIntBetween(1, 1000)), + createTestShardLimitService(randomIntBetween(1, 1000), false), new Environment(builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, null, diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index 60c467e3864e9..f19689565dd92 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -59,6 +59,7 @@ import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex; import static org.opensearch.cluster.shards.ShardCounts.forDataNodeCount; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -113,7 +114,7 @@ public void testUnderShardLimit() { * even though it exceeds the cluster max shard limit */ public void testSystemIndexCreationSucceeds() { - final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); final Settings settings = Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) .put(SETTING_NUMBER_OF_SHARDS, 1) @@ -128,7 +129,7 @@ public void testSystemIndexCreationSucceeds() { * fails when it exceeds the cluster max shard limit */ public void testNonSystemIndexCreationFails() { - final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); final Settings settings = Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) .put(SETTING_NUMBER_OF_SHARDS, 1) @@ -151,6 +152,92 @@ public void testNonSystemIndexCreationFails() { ); } + /** + * This test validates that index starting with dot creation Succeeds + * when the setting cluster.ignore_dot_indexes is set to true. + */ + public void testDotIndexCreationSucceeds() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit(".test-index", settings, state); + } + + /** + * This test validates that index starting with dot creation fails + * when the setting cluster.ignore_dot_indexes is set to false. + */ + public void testDotIndexCreationFails() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(".test-index", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + 1 + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that dataStream index creation fails + * when the cluster.ignore_dot_indexes is set to true, and we reach the max shard per node limit. + */ + public void testDataStreamIndexCreationFails() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(".ds-test-index", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + 1 + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that dataStream index creation succeeds + * when the cluster.ignore_dot_indexes is set to true, and we don't reach the max shard per node limit. + */ + public void testDataStreamIndexCreationSucceeds() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(4, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit(".ds-test-index", settings, state); + } + /** * This test validates that non-system index opening * fails when it exceeds the cluster max shard limit @@ -174,7 +261,7 @@ public void testNonSystemIndexOpeningFails() { int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); int maxShards = counts.getShardsPerNode() * nodesInCluster; - ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode()); + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); ValidationException exception = expectThrows( ValidationException.class, () -> shardLimitValidator.validateShardLimit(state, indices) @@ -214,10 +301,124 @@ public void testSystemIndexOpeningSucceeds() { .toArray(new Index[2]); // Shard limit validation succeeds without any issues as system index is being opened - ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode()); + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); + shardLimitValidator.validateShardLimit(state, indices); + } + + /** + * This test validates that index having '.' in the first character + * opening of such indexes succeeds even when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes setting is set to true. + */ + public void testDotIndexOpeningSucceeds() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".test-index", // Adding closed index starting with dot to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + // Shard limit validation succeeds without any issues + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), true); shardLimitValidator.validateShardLimit(state, indices); } + /** + * This test validates that index having '.' in the first character + * opening fails when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes is set to false. + */ + public void testDotIndexOpeningFails() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".test-index", // Adding closed index starting with dot to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); + ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(state, indices) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + totalShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that index starting with '.ds-' + * opening fails when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes is set to true. + */ + public void testDataStreamIndexOpeningFails() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".ds-test-index", // Adding closed data stream index to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), true); + ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(state, indices) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + totalShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;", + exception.getMessage() + ); + } + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int shardsInIndex, int replicas) { ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); for (int i = 0; i < nodesInCluster; i++) { @@ -292,12 +493,16 @@ public static ClusterState createClusterForShardLimitTest( * Creates a {@link ShardLimitValidator} for testing with the given setting and a mocked cluster service. * * @param maxShardsPerNode the value to use for the max shards per node setting + * @param ignoreDotIndexes validates if index starting with dot should be ignored or not * @return a test instance */ - public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode) { + public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode, boolean ignoreDotIndexes) { // Use a mocked clusterService - for unit tests we won't be updating the setting anyway. ClusterService clusterService = mock(ClusterService.class); - Settings limitOnlySettings = Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode).build(); + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), ignoreDotIndexes) + .build(); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings(limitOnlySettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); @@ -309,11 +514,19 @@ public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNo * Creates a {@link ShardLimitValidator} for testing with the given setting and a given cluster service. * * @param maxShardsPerNode the value to use for the max shards per node setting + * @param ignoreDotIndexes validates if index starting with dot should be ignored or not * @param clusterService the cluster service to use * @return a test instance */ - public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode, ClusterService clusterService) { - Settings limitOnlySettings = Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode).build(); + public static ShardLimitValidator createTestShardLimitService( + int maxShardsPerNode, + boolean ignoreDotIndexes, + ClusterService clusterService + ) { + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), ignoreDotIndexes) + .build(); return new ShardLimitValidator(limitOnlySettings, clusterService, new SystemIndices(emptyMap())); } From 3af46aeceed6b710301bac3318b2f4554d6f11e4 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 19 Oct 2022 19:30:04 -0400 Subject: [PATCH 3/5] Addressing 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ (#4827) * Addressing 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ Signed-off-by: Andriy Redko * Addressing code review comments Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + .../opensearch/client/RestClientBuilder.java | 14 +++++++++++++- .../documentation/RestClientDocumentation.java | 18 ++++++++++++++++++ .../index/reindex/ReindexSslConfig.java | 11 +++++++++++ .../test/rest/OpenSearchRestTestCase.java | 15 ++++++++++++++- 5 files changed, 57 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index aae68b7f715d7..9f7e9b02eb764 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -149,6 +149,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix decommission status update to non leader nodes ([4800](https://github.com/opensearch-project/OpenSearch/pull/4800)) - Fix recovery path for searchable snapshots ([4813](https://github.com/opensearch-project/OpenSearch/pull/4813)) - Fix bug in AwarenessAttributeDecommissionIT([4822](https://github.com/opensearch-project/OpenSearch/pull/4822)) +- Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) ### Security - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index 679a7ccb17d49..a01cf2f403099 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -32,8 +32,10 @@ package org.opensearch.client; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.apache.hc.core5.util.Timeout; import org.apache.hc.client5.http.async.HttpAsyncClient; import org.apache.hc.client5.http.auth.CredentialsProvider; @@ -48,6 +50,7 @@ import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; import java.security.AccessController; import java.security.NoSuchAlgorithmException; @@ -311,7 +314,16 @@ private CloseableHttpAsyncClient createHttpClient() { } try { - final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(SSLContext.getDefault()).build(); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(SSLContext.getDefault()) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE) diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index f4c1c98dd4ce9..b2807d35d230e 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -40,6 +40,7 @@ import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.ContentType; import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.HttpEntity; @@ -51,6 +52,7 @@ import org.apache.hc.core5.http.message.RequestLine; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.apache.hc.core5.ssl.SSLContextBuilder; import org.apache.hc.core5.ssl.SSLContexts; import org.apache.hc.core5.util.Timeout; @@ -67,6 +69,8 @@ import org.opensearch.client.RestClientBuilder.HttpClientConfigCallback; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.IOException; import java.io.InputStream; import java.nio.charset.StandardCharsets; @@ -429,6 +433,13 @@ public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() .setSslContext(sslContext) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() @@ -463,6 +474,13 @@ public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() .setSslContext(sslContext) + // See please https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index f8e9018bce6df..0e0e387b78e38 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -35,7 +35,9 @@ import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; import org.apache.hc.client5.http.ssl.DefaultHostnameVerifier; import org.apache.hc.client5.http.ssl.NoopHostnameVerifier; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.SecureString; import org.opensearch.common.settings.Setting; @@ -50,6 +52,8 @@ import javax.net.ssl.HostnameVerifier; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.IOException; import java.io.UncheckedIOException; import java.nio.file.Path; @@ -178,6 +182,13 @@ TlsStrategy getStrategy() { .setHostnameVerifier(hostnameVerifier) .setCiphers(cipherSuites) .setTlsVersions(protocols) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) .build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index 348ea0a924b70..0b384f07cc7ea 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -37,12 +37,14 @@ import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.function.Factory; import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.HttpStatus; import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.hc.core5.http.message.BasicHeader; import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.ssl.TlsDetails; import org.apache.hc.core5.ssl.SSLContexts; import org.apache.hc.core5.util.Timeout; import org.apache.lucene.util.SetOnce; @@ -85,6 +87,8 @@ import org.junit.Before; import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; + import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; @@ -851,7 +855,16 @@ protected static void configureClient(RestClientBuilder builder, Settings settin } final SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); builder.setHttpClientConfigCallback(httpClientBuilder -> { - final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(sslcontext).build(); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslcontext) + // See https://issues.apache.org/jira/browse/HTTPCLIENT-2219 + .setTlsDetailsFactory(new Factory() { + @Override + public TlsDetails create(final SSLEngine sslEngine) { + return new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()); + } + }) + .build(); final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setTlsStrategy(tlsStrategy) From 334473872ae33de53958c35c50849ed2d2c1bee8 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Thu, 20 Oct 2022 19:07:14 +0530 Subject: [PATCH 4/5] Fail weight update when decommission ongoing and fail decommission when attribute not weighed away (#4839) * Add checks for decommission before setting weights Signed-off-by: Rishab Nahata --- CHANGELOG.md | 1 + .../AwarenessAttributeDecommissionIT.java | 69 +++++++++++++++ .../decommission/DecommissionService.java | 28 ++++++ .../routing/WeightedRoutingService.java | 15 ++++ .../DecommissionServiceTests.java | 64 ++++++++++++++ .../routing/WeightedRoutingServiceTests.java | 86 +++++++++++++++++++ 6 files changed, 263 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f7e9b02eb764..1e5d7a3b57862 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Refactored BalancedAllocator.Balancer to LocalShardsBalancer ([#4761](https://github.com/opensearch-project/OpenSearch/pull/4761)) +- Fail weight update when decommission ongoing and fail decommission when attribute not weighed away ([#4839](https://github.com/opensearch-project/OpenSearch/pull/4839)) ### Deprecated ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index a2270d63ba6fa..2dc964e3e8845 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -20,11 +20,15 @@ import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.DecommissioningFailedException; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; @@ -37,6 +41,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; import static org.opensearch.test.NodeRoles.onlyRole; @@ -102,6 +107,17 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx ensureStableCluster(6); + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + logger.info("--> starting decommissioning nodes in zone {}", 'c'); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); @@ -162,4 +178,57 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx // as by then all nodes should have joined the cluster ensureStableCluster(6, TimeValue.timeValueMinutes(2)); } + + public void testDecommissionFailedWhenAttributeNotWeighedAway() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + // Start 3 cluster manager eligible nodes + internalCluster().startClusterManagerOnlyNodes(3, Settings.builder().put(commonSettings).build()); + // start 3 data nodes + internalCluster().startDataOnlyNodes(3, Settings.builder().put(commonSettings).build()); + ensureStableCluster(6); + ClusterHealthResponse health = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes(Integer.toString(6)) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); + assertBusy(() -> { + DecommissioningFailedException ex = expectThrows( + DecommissioningFailedException.class, + () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() + ); + assertTrue( + ex.getMessage() + .contains("no weights are set to the attribute. Please set appropriate weights before triggering decommission action") + ); + }); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 1.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + + assertBusy(() -> { + DecommissioningFailedException ex = expectThrows( + DecommissioningFailedException.class, + () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() + ); + assertTrue(ex.getMessage().contains("weight for decommissioned attribute is expected to be [0.0] but found [1.0]")); + }); + } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index b2c8bfbc0cdc8..e6639ae058066 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -20,7 +20,9 @@ import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -129,6 +131,8 @@ public ClusterState execute(ClusterState currentState) { DecommissionAttributeMetadata decommissionAttributeMetadata = currentState.metadata().decommissionAttributeMetadata(); // check that request is eligible to proceed ensureEligibleRequest(decommissionAttributeMetadata, decommissionAttribute); + // ensure attribute is weighed away + ensureToBeDecommissionedAttributeWeighedAway(currentState, decommissionAttribute); decommissionAttributeMetadata = new DecommissionAttributeMetadata(decommissionAttribute); logger.info("registering decommission metadata [{}] to execute action", decommissionAttributeMetadata.toString()); return ClusterState.builder(currentState) @@ -413,6 +417,30 @@ private static void validateAwarenessAttribute( } } + private static void ensureToBeDecommissionedAttributeWeighedAway(ClusterState state, DecommissionAttribute decommissionAttribute) { + WeightedRoutingMetadata weightedRoutingMetadata = state.metadata().weightedRoutingMetadata(); + if (weightedRoutingMetadata == null) { + throw new DecommissioningFailedException( + decommissionAttribute, + "no weights are set to the attribute. Please set appropriate weights before triggering decommission action" + ); + } + WeightedRouting weightedRouting = weightedRoutingMetadata.getWeightedRouting(); + if (weightedRouting.attributeName().equals(decommissionAttribute.attributeName()) == false) { + throw new DecommissioningFailedException( + decommissionAttribute, + "no weights are specified to attribute [" + decommissionAttribute.attributeName() + "]" + ); + } + Double attributeValueWeight = weightedRouting.weights().get(decommissionAttribute.attributeValue()); + if (attributeValueWeight == null || attributeValueWeight.equals(0.0) == false) { + throw new DecommissioningFailedException( + decommissionAttribute, + "weight for decommissioned attribute is expected to be [0.0] but found [" + attributeValueWeight + "]" + ); + } + } + private static void ensureEligibleRequest( DecommissionAttributeMetadata decommissionAttributeMetadata, DecommissionAttribute requestedDecommissionAttribute diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index 54f2c81aea384..6acb4a1e832cb 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -19,6 +19,8 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; @@ -68,6 +70,8 @@ public void registerWeightedRoutingMetadata( clusterService.submitStateUpdateTask("update_weighted_routing", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { + // verify currently no decommission action is ongoing + ensureNoOngoingDecommissionAction(currentState); Metadata metadata = currentState.metadata(); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); WeightedRoutingMetadata weightedRoutingMetadata = metadata.custom(WeightedRoutingMetadata.TYPE); @@ -154,4 +158,15 @@ public void verifyAwarenessAttribute(String attributeName) { throw validationException; } } + + public void ensureNoOngoingDecommissionAction(ClusterState state) { + DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null && decommissionAttributeMetadata.status().equals(DecommissionStatus.FAILED) == false) { + throw new IllegalStateException( + "a decommission action is ongoing with status [" + + decommissionAttributeMetadata.status().status() + + "], cannot update weight during this state" + ); + } + } } diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 7dee51b7713f9..c4cf6c7cc6641 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -22,14 +22,17 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.WeightedRouting; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; @@ -169,6 +172,56 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + public void testDecommissionNotStartedWithoutWeighingAwayAttribute_1() throws InterruptedException { + Map weights = Map.of("zone_1", 1.0, "zone_2", 1.0, "zone_3", 0.0); + setWeightedRoutingWeights(weights); + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone_1"); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DecommissionResponse decommissionResponse) { + fail("on response shouldn't have been called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof DecommissioningFailedException); + assertThat( + e.getMessage(), + Matchers.containsString("weight for decommissioned attribute is expected to be [0.0] but found [1.0]") + ); + countDownLatch.countDown(); + } + }; + decommissionService.startDecommissionAction(decommissionAttribute, listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + + public void testDecommissionNotStartedWithoutWeighingAwayAttribute_2() throws InterruptedException { + final CountDownLatch countDownLatch = new CountDownLatch(1); + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone_1"); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DecommissionResponse decommissionResponse) { + fail("on response shouldn't have been called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e instanceof DecommissioningFailedException); + assertThat( + e.getMessage(), + Matchers.containsString( + "no weights are set to the attribute. Please set appropriate weights before triggering decommission action" + ) + ); + countDownLatch.countDown(); + } + }; + decommissionService.startDecommissionAction(decommissionAttribute, listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } + @SuppressWarnings("unchecked") public void testDecommissioningFailedWhenAnotherAttributeDecommissioningSuccessful() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); @@ -286,6 +339,17 @@ public void onFailure(Exception e) { assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + private void setWeightedRoutingWeights(Map weights) { + ClusterState clusterState = clusterService.state(); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + WeightedRoutingMetadata weightedRoutingMetadata = new WeightedRoutingMetadata(weightedRouting); + Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); + metadataBuilder.putCustom(WeightedRoutingMetadata.TYPE, weightedRoutingMetadata); + clusterState = ClusterState.builder(clusterState).metadata(metadataBuilder).build(); + ClusterState.Builder builder = ClusterState.builder(clusterState); + ClusterServiceUtils.setState(clusterService, builder); + } + private ClusterState addDataNodes(ClusterState clusterState, String zone, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()); org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.add(newDataNode(nodeId, singletonMap("zone", zone)))); diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index fc5d46ef84c79..91b8703cacf5c 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -8,6 +8,7 @@ package org.opensearch.cluster.routing; +import org.hamcrest.MatcherAssert; import org.junit.After; import org.junit.Before; import org.opensearch.Version; @@ -21,6 +22,9 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -43,6 +47,11 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; public class WeightedRoutingServiceTests extends OpenSearchTestCase { private ThreadPool threadPool; @@ -173,6 +182,15 @@ private ClusterState setWeightedRoutingWeights(ClusterState clusterState, Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); ClusterState state = clusterService.state(); @@ -276,4 +294,72 @@ public void testVerifyAwarenessAttribute_ValidAttributeName() { fail("verify awareness attribute should not fail"); } } + + public void testAddWeightedRoutingFailsWhenDecommissionOngoing() throws InterruptedException { + Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); + DecommissionStatus status = randomFrom(DecommissionStatus.INIT, DecommissionStatus.IN_PROGRESS, DecommissionStatus.SUCCESSFUL); + ClusterState state = clusterService.state(); + state = setWeightedRoutingWeights(state, weights); + state = setDecommissionAttribute(state, status); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterPutWeightedRoutingRequestBuilder request = new ClusterPutWeightedRoutingRequestBuilder( + client, + ClusterAddWeightedRoutingAction.INSTANCE + ); + WeightedRouting updatedWeightedRouting = new WeightedRouting("zone", weights); + request.setWeightedRouting(updatedWeightedRouting); + final CountDownLatch countDownLatch = new CountDownLatch(1); + final AtomicReference exceptionReference = new AtomicReference<>(); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionReference.set(e); + countDownLatch.countDown(); + } + }; + weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); + MatcherAssert.assertThat(exceptionReference.get(), instanceOf(IllegalStateException.class)); + MatcherAssert.assertThat(exceptionReference.get().getMessage(), containsString("a decommission action is ongoing with status")); + } + + public void testAddWeightedRoutingPassesWhenDecommissionFailed() throws InterruptedException { + Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); + DecommissionStatus status = DecommissionStatus.FAILED; + ClusterState state = clusterService.state(); + state = setWeightedRoutingWeights(state, weights); + state = setDecommissionAttribute(state, status); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterPutWeightedRoutingRequestBuilder request = new ClusterPutWeightedRoutingRequestBuilder( + client, + ClusterAddWeightedRoutingAction.INSTANCE + ); + WeightedRouting updatedWeightedRouting = new WeightedRouting("zone", weights); + request.setWeightedRouting(updatedWeightedRouting); + final CountDownLatch countDownLatch = new CountDownLatch(1); + final AtomicReference exceptionReference = new AtomicReference<>(); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + assertTrue(clusterStateUpdateResponse.isAcknowledged()); + assertEquals(updatedWeightedRouting, clusterService.state().metadata().weightedRoutingMetadata().getWeightedRouting()); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) {} + }; + weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + } } From 515f84bf92ad06f2178d4d5269c38a78a6bf215b Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 20 Oct 2022 11:50:33 -0500 Subject: [PATCH 5/5] [Remove] Deprecated serialization logic from pipeline aggs (#4847) Removes the deprecated readTo/writeFrom serialization logic from piepline aggs that is no longer used as of Legacy version 7.8. Signed-off-by: Nicholas Walter Knize --- CHANGELOG.md | 1 + .../org/opensearch/plugins/SearchPlugin.java | 74 +----------- .../org/opensearch/search/SearchModule.java | 36 ------ .../search/aggregations/AggregationPhase.java | 3 +- .../aggregations/InternalAggregation.java | 40 ------- .../aggregations/InternalAggregations.java | 107 +----------------- .../pipeline/AvgBucketPipelineAggregator.java | 14 --- .../BucketMetricsPipelineAggregator.java | 21 ---- .../BucketScriptPipelineAggregator.java | 28 ----- .../BucketSelectorPipelineAggregator.java | 26 ----- .../BucketSortPipelineAggregator.java | 27 ----- .../CumulativeSumPipelineAggregator.java | 21 ---- .../DerivativePipelineAggregator.java | 25 ---- ...ExtendedStatsBucketPipelineAggregator.java | 21 ---- .../pipeline/MaxBucketPipelineAggregator.java | 14 --- .../pipeline/MinBucketPipelineAggregator.java | 14 --- .../pipeline/MovAvgPipelineAggregator.java | 31 ----- .../pipeline/MovFnPipelineAggregator.java | 28 ----- .../PercentilesBucketPipelineAggregator.java | 23 ---- .../pipeline/PipelineAggregator.java | 55 +-------- .../SerialDiffPipelineAggregator.java | 25 ---- .../pipeline/SiblingPipelineAggregator.java | 9 -- .../StatsBucketPipelineAggregator.java | 11 -- .../pipeline/SumBucketPipelineAggregator.java | 14 --- .../search/query/QuerySearchResult.java | 40 ++----- .../opensearch/search/SearchModuleTests.java | 24 +--- .../InternalAggregationsTests.java | 27 +---- .../terms/SignificanceHeuristicTests.java | 5 - .../test/InternalAggregationTestCase.java | 58 ---------- 29 files changed, 22 insertions(+), 800 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1e5d7a3b57862..c7b736205b87b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -102,6 +102,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) - Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) - Remove Legacy Version support from Snapshot/Restore Service ([#4728](https://github.com/opensearch-project/OpenSearch/pull/4728)) +- Remove deprecated serialization logic from pipeline aggs ([#4847](https://github.com/opensearch-project/OpenSearch/pull/4847)) ### Fixed - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) diff --git a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java index af7d4fc2e9fe5..a61a73255aa61 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPlugin.java @@ -621,52 +621,6 @@ class PipelineAggregationSpec extends SearchExtensionSpec< PipelineAggregationBuilder, ContextParser> { private final Map> resultReaders = new TreeMap<>(); - /** - * Read the aggregator from a stream. - * @deprecated Pipelines implemented after 7.8.0 do not need to be sent across the wire - */ - @Deprecated - private final Writeable.Reader aggregatorReader; - - /** - * Specification of a {@link PipelineAggregator}. - * - * @param name holds the names by which this aggregation might be parsed. The {@link ParseField#getPreferredName()} is special as it - * is the name by under which the readers are registered. So it is the name that the {@link PipelineAggregationBuilder} and - * {@link PipelineAggregator} should return from {@link NamedWriteable#getWriteableName()}. It is an error if - * {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. - * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a - * {@link StreamInput} - * @param parser reads the aggregation builder from XContent - */ - public PipelineAggregationSpec( - ParseField name, - Writeable.Reader builderReader, - ContextParser parser - ) { - super(name, builderReader, parser); - this.aggregatorReader = null; - } - - /** - * Specification of a {@link PipelineAggregator}. - * - * @param name name by which this aggregation might be parsed or deserialized. Make sure it is the name that the - * {@link PipelineAggregationBuilder} and {@link PipelineAggregator} should return from - * {@link NamedWriteable#getWriteableName()}. It is an error if this name conflicts with another registered name, including - * names from other plugins. - * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a - * {@link StreamInput} - * @param parser reads the aggregation builder from XContent - */ - public PipelineAggregationSpec( - String name, - Writeable.Reader builderReader, - ContextParser parser - ) { - super(name, builderReader, parser); - this.aggregatorReader = null; - } /** * Specification of a {@link PipelineAggregator}. @@ -677,20 +631,14 @@ public PipelineAggregationSpec( * {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @param parser reads the aggregation builder from XContent - * @deprecated Use {@link PipelineAggregationSpec#PipelineAggregationSpec(ParseField, Writeable.Reader, ContextParser)} for - * pipelines implemented after 7.8.0 */ - @Deprecated public PipelineAggregationSpec( ParseField name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, ContextParser parser ) { super(name, builderReader, parser); - this.aggregatorReader = aggregatorReader; } /** @@ -702,20 +650,15 @@ public PipelineAggregationSpec( * names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @param parser reads the aggregation builder from XContent - * @deprecated Use {@link PipelineAggregationSpec#PipelineAggregationSpec(String, Writeable.Reader, ContextParser)} for pipelines - * implemented after 7.8.0 + */ - @Deprecated public PipelineAggregationSpec( String name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, ContextParser parser ) { super(name, builderReader, parser); - this.aggregatorReader = aggregatorReader; } /** @@ -727,7 +670,6 @@ public PipelineAggregationSpec( * {@link ParseField#getPreferredName()} conflicts with another registered name, including names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @param parser reads the aggregation builder from XContent * @deprecated prefer the ctor that takes a {@link ContextParser} */ @@ -735,11 +677,9 @@ public PipelineAggregationSpec( public PipelineAggregationSpec( ParseField name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, PipelineAggregator.Parser parser ) { super(name, builderReader, (p, n) -> parser.parse(n, p)); - this.aggregatorReader = aggregatorReader; } /** @@ -751,18 +691,15 @@ public PipelineAggregationSpec( * names from other plugins. * @param builderReader the reader registered for this aggregation's builder. Typically, a reference to a constructor that takes a * {@link StreamInput} - * @param aggregatorReader reads the {@link PipelineAggregator} from a stream * @deprecated prefer the ctor that takes a {@link ContextParser} */ @Deprecated public PipelineAggregationSpec( String name, Writeable.Reader builderReader, - Writeable.Reader aggregatorReader, PipelineAggregator.Parser parser ) { super(name, builderReader, (p, n) -> parser.parse(n, p)); - this.aggregatorReader = aggregatorReader; } /** @@ -781,15 +718,6 @@ public PipelineAggregationSpec addResultReader(String writeableName, Writeable.R return this; } - /** - * Read the aggregator from a stream. - * @deprecated Pipelines implemented after 7.8.0 do not need to be sent across the wire - */ - @Deprecated - public Writeable.Reader getAggregatorReader() { - return aggregatorReader; - } - /** * Get the readers that must be registered for this aggregation's results. */ diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 0149f9a025bcd..4bf7dca9ef444 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -211,21 +211,14 @@ import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.BucketScriptPipelineAggregator; import org.opensearch.search.aggregations.pipeline.BucketSelectorPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.BucketSelectorPipelineAggregator; import org.opensearch.search.aggregations.pipeline.BucketSortPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.BucketSortPipelineAggregator; import org.opensearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.CumulativeSumPipelineAggregator; import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregator; import org.opensearch.search.aggregations.pipeline.EwmaModel; import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketParser; import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.HoltLinearModel; import org.opensearch.search.aggregations.pipeline.HoltWintersModel; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; @@ -236,24 +229,15 @@ import org.opensearch.search.aggregations.pipeline.InternalStatsBucket; import org.opensearch.search.aggregations.pipeline.LinearModel; import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.MinBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MinBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.MovAvgModel; import org.opensearch.search.aggregations.pipeline.MovAvgPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MovAvgPipelineAggregator; import org.opensearch.search.aggregations.pipeline.MovFnPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.MovFnPipelineAggregator; import org.opensearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.PercentilesBucketPipelineAggregator; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.SerialDiffPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.SerialDiffPipelineAggregator; import org.opensearch.search.aggregations.pipeline.SimpleModel; import org.opensearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.StatsBucketPipelineAggregator; import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregator; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.fetch.FetchSubPhase; @@ -710,7 +694,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( DerivativePipelineAggregationBuilder.NAME, DerivativePipelineAggregationBuilder::new, - DerivativePipelineAggregator::new, DerivativePipelineAggregationBuilder::parse ).addResultReader(InternalDerivative::new) ); @@ -718,7 +701,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MaxBucketPipelineAggregationBuilder.NAME, MaxBucketPipelineAggregationBuilder::new, - MaxBucketPipelineAggregator::new, MaxBucketPipelineAggregationBuilder.PARSER ) // This bucket is used by many pipeline aggreations. @@ -728,7 +710,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MinBucketPipelineAggregationBuilder.NAME, MinBucketPipelineAggregationBuilder::new, - MinBucketPipelineAggregator::new, MinBucketPipelineAggregationBuilder.PARSER ) /* Uses InternalBucketMetricValue */ @@ -737,7 +718,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( AvgBucketPipelineAggregationBuilder.NAME, AvgBucketPipelineAggregationBuilder::new, - AvgBucketPipelineAggregator::new, AvgBucketPipelineAggregationBuilder.PARSER ) // This bucket is used by many pipeline aggreations. @@ -747,7 +727,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( SumBucketPipelineAggregationBuilder.NAME, SumBucketPipelineAggregationBuilder::new, - SumBucketPipelineAggregator::new, SumBucketPipelineAggregationBuilder.PARSER ) /* Uses InternalSimpleValue */ @@ -756,7 +735,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( StatsBucketPipelineAggregationBuilder.NAME, StatsBucketPipelineAggregationBuilder::new, - StatsBucketPipelineAggregator::new, StatsBucketPipelineAggregationBuilder.PARSER ).addResultReader(InternalStatsBucket::new) ); @@ -764,7 +742,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( ExtendedStatsBucketPipelineAggregationBuilder.NAME, ExtendedStatsBucketPipelineAggregationBuilder::new, - ExtendedStatsBucketPipelineAggregator::new, new ExtendedStatsBucketParser() ).addResultReader(InternalExtendedStatsBucket::new) ); @@ -772,7 +749,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( PercentilesBucketPipelineAggregationBuilder.NAME, PercentilesBucketPipelineAggregationBuilder::new, - PercentilesBucketPipelineAggregator::new, PercentilesBucketPipelineAggregationBuilder.PARSER ).addResultReader(InternalPercentilesBucket::new) ); @@ -780,7 +756,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MovAvgPipelineAggregationBuilder.NAME, MovAvgPipelineAggregationBuilder::new, - MovAvgPipelineAggregator::new, (XContentParser parser, String name) -> MovAvgPipelineAggregationBuilder.parse( movingAverageModelParserRegistry, name, @@ -792,7 +767,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( CumulativeSumPipelineAggregationBuilder.NAME, CumulativeSumPipelineAggregationBuilder::new, - CumulativeSumPipelineAggregator::new, CumulativeSumPipelineAggregationBuilder.PARSER ) ); @@ -800,7 +774,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( BucketScriptPipelineAggregationBuilder.NAME, BucketScriptPipelineAggregationBuilder::new, - BucketScriptPipelineAggregator::new, BucketScriptPipelineAggregationBuilder.PARSER ) ); @@ -808,7 +781,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( BucketSelectorPipelineAggregationBuilder.NAME, BucketSelectorPipelineAggregationBuilder::new, - BucketSelectorPipelineAggregator::new, BucketSelectorPipelineAggregationBuilder::parse ) ); @@ -816,7 +788,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( BucketSortPipelineAggregationBuilder.NAME, BucketSortPipelineAggregationBuilder::new, - BucketSortPipelineAggregator::new, BucketSortPipelineAggregationBuilder::parse ) ); @@ -824,7 +795,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( SerialDiffPipelineAggregationBuilder.NAME, SerialDiffPipelineAggregationBuilder::new, - SerialDiffPipelineAggregator::new, SerialDiffPipelineAggregationBuilder::parse ) ); @@ -832,7 +802,6 @@ private void registerPipelineAggregations(List plugins) { new PipelineAggregationSpec( MovFnPipelineAggregationBuilder.NAME, MovFnPipelineAggregationBuilder::new, - MovFnPipelineAggregator::new, MovFnPipelineAggregationBuilder.PARSER ) ); @@ -847,11 +816,6 @@ private void registerPipelineAggregation(PipelineAggregationSpec spec) { namedWriteables.add( new NamedWriteableRegistry.Entry(PipelineAggregationBuilder.class, spec.getName().getPreferredName(), spec.getReader()) ); - if (spec.getAggregatorReader() != null) { - namedWriteables.add( - new NamedWriteableRegistry.Entry(PipelineAggregator.class, spec.getName().getPreferredName(), spec.getAggregatorReader()) - ); - } for (Map.Entry> resultReader : spec.getResultReaders().entrySet()) { namedWriteables.add( new NamedWriteableRegistry.Entry(InternalAggregation.class, resultReader.getKey(), resultReader.getValue()) diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java index c091a1bc47c05..946da6e9c3369 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregationPhase.java @@ -148,8 +148,7 @@ public void execute(SearchContext context) { throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e); } } - context.queryResult() - .aggregations(new InternalAggregations(aggregations, context.request().source().aggregations()::buildPipelineTree)); + context.queryResult().aggregations(new InternalAggregations(aggregations)); // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java index 6ccd429388873..714de06fef1e0 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregation.java @@ -31,7 +31,6 @@ package org.opensearch.search.aggregations; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.io.stream.StreamInput; @@ -187,8 +186,6 @@ public void consumeBucketsAndMaybeBreak(int size) { protected final Map metadata; - private List pipelineAggregatorsForBwcSerialization; - /** * Constructs an aggregation result with a given name. * @@ -199,46 +196,18 @@ protected InternalAggregation(String name, Map metadata) { this.metadata = metadata; } - /** - * Merge a {@linkplain PipelineAggregator.PipelineTree} into this - * aggregation result tree before serializing to a node older than - * 7.8.0. - */ - public final void mergePipelineTreeForBWCSerialization(PipelineAggregator.PipelineTree pipelineTree) { - if (pipelineAggregatorsForBwcSerialization != null) { - /* - * This method is called once per level on the results but only - * has useful pipeline aggregations on the top level. Every level - * below the top will always be empty. So if we've already been - * called we should bail. This is pretty messy but it is the kind - * of weird thing we have to do to deal with bwc serialization.... - */ - return; - } - pipelineAggregatorsForBwcSerialization = pipelineTree.aggregators(); - forEachBucket(bucketAggs -> bucketAggs.mergePipelineTreeForBWCSerialization(pipelineTree)); - } - /** * Read from a stream. */ protected InternalAggregation(StreamInput in) throws IOException { name = in.readString(); metadata = in.readMap(); - if (in.getVersion().before(LegacyESVersion.V_7_8_0)) { - in.readNamedWriteableList(PipelineAggregator.class); - } } @Override public final void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeGenericValue(metadata); - if (out.getVersion().before(LegacyESVersion.V_7_8_0)) { - assert pipelineAggregatorsForBwcSerialization != null - : "serializing to pre-7.8.0 versions should have called mergePipelineTreeForBWCSerialization"; - out.writeNamedWriteableList(pipelineAggregatorsForBwcSerialization); - } doWriteTo(out); } @@ -355,15 +324,6 @@ public Map getMetadata() { return metadata; } - /** - * The {@linkplain PipelineAggregator}s sent to older versions of OpenSearch. - * @deprecated only use these for serializing to older OpenSearch versions - */ - @Deprecated - public List pipelineAggregatorsForBwcSerialization() { - return pipelineAggregatorsForBwcSerialization; - } - @Override public String getType() { return getWriteableName(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java index 79dd8d756dede..16d7898118fc3 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/opensearch/search/aggregations/InternalAggregations.java @@ -31,14 +31,12 @@ package org.opensearch.search.aggregations; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.opensearch.search.aggregations.support.AggregationPath; @@ -50,13 +48,8 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.function.Function; -import java.util.function.Supplier; import java.util.stream.Collectors; -import static java.util.Collections.emptyList; -import static java.util.stream.Collectors.toList; - /** * An internal implementation of {@link Aggregations}. * @@ -76,29 +69,11 @@ public final class InternalAggregations extends Aggregations implements Writeabl } }; - /** - * The way to build a tree of pipeline aggregators. Used only for - * serialization backwards compatibility. - */ - private final Supplier pipelineTreeForBwcSerialization; - /** * Constructs a new aggregation. */ - private InternalAggregations(List aggregations) { - super(aggregations); - this.pipelineTreeForBwcSerialization = null; - } - - /** - * Constructs a node in the aggregation tree. - * @param pipelineTreeSource must be null inside the tree or after final reduction. Should reference the - * search request otherwise so we can properly serialize the response to - * versions of OpenSearch that require the pipelines to be serialized. - */ - public InternalAggregations(List aggregations, Supplier pipelineTreeSource) { + public InternalAggregations(List aggregations) { super(aggregations); - this.pipelineTreeForBwcSerialization = pipelineTreeSource; } public static InternalAggregations from(List aggregations) { @@ -110,45 +85,12 @@ public static InternalAggregations from(List aggregations) public static InternalAggregations readFrom(StreamInput in) throws IOException { final InternalAggregations res = from(in.readList(stream -> in.readNamedWriteable(InternalAggregation.class))); - if (in.getVersion().before(LegacyESVersion.V_7_8_0)) { - /* - * Setting the pipeline tree source to null is here is correct but - * only because we don't immediately pass the InternalAggregations - * off to another node. Instead, we always reduce together with - * many aggregations and that always adds the tree read from the - * current request. - */ - in.readNamedWriteableList(PipelineAggregator.class); - } return res; } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_8_0)) { - if (pipelineTreeForBwcSerialization == null) { - mergePipelineTreeForBWCSerialization(PipelineTree.EMPTY); - out.writeNamedWriteableList(getInternalAggregations()); - out.writeNamedWriteableList(emptyList()); - } else { - PipelineAggregator.PipelineTree pipelineTree = pipelineTreeForBwcSerialization.get(); - mergePipelineTreeForBWCSerialization(pipelineTree); - out.writeNamedWriteableList(getInternalAggregations()); - out.writeNamedWriteableList(pipelineTree.aggregators()); - } - } else { - out.writeNamedWriteableList(getInternalAggregations()); - } - } - - /** - * Merge a {@linkplain PipelineAggregator.PipelineTree} into this - * aggregation result tree before serializing to a node older than - * 7.8.0. - */ - public void mergePipelineTreeForBWCSerialization(PipelineAggregator.PipelineTree pipelineTree) { - getInternalAggregations().stream() - .forEach(agg -> { agg.mergePipelineTreeForBWCSerialization(pipelineTree.subTree(agg.getName())); }); + out.writeNamedWriteableList(getInternalAggregations()); } /** @@ -160,26 +102,6 @@ public List copyResults() { return new ArrayList<>(getInternalAggregations()); } - /** - * Get the top level pipeline aggregators. - * @deprecated these only exist for BWC serialization - */ - @Deprecated - public List getTopLevelPipelineAggregators() { - if (pipelineTreeForBwcSerialization == null) { - return emptyList(); - } - return pipelineTreeForBwcSerialization.get().aggregators().stream().map(p -> (SiblingPipelineAggregator) p).collect(toList()); - } - - /** - * Get the transient pipeline tree used to serialize pipeline aggregators to old nodes. - */ - @Deprecated - Supplier getPipelineTreeForBwcSerialization() { - return pipelineTreeForBwcSerialization; - } - @SuppressWarnings("unchecked") private List getInternalAggregations() { return (List) aggregations; @@ -208,11 +130,7 @@ public double sortValue(AggregationPath.PathElement head, Iterator aggregationsList, ReduceContext context) { - InternalAggregations reduced = reduce( - aggregationsList, - context, - reducedAggregations -> new InternalAggregations(reducedAggregations, context.pipelineTreeForBwcSerialization()) - ); + InternalAggregations reduced = reduce(aggregationsList, context); if (reduced == null) { return null; } @@ -238,16 +156,8 @@ public static InternalAggregations topLevelReduce(List agg * {@link InternalAggregations} object found in the list. * Note that pipeline aggregations _are not_ reduced by this method. Pipelines are handled * separately by {@link InternalAggregations#topLevelReduce(List, ReduceContext)} - * @param ctor used to build the {@link InternalAggregations}. The top level reduce specifies a constructor - * that adds pipeline aggregation information that is used to send pipeline aggregations to - * older versions of Elasticsearch that require the pipeline aggregations to be returned - * as part of the aggregation tree */ - public static InternalAggregations reduce( - List aggregationsList, - ReduceContext context, - Function, InternalAggregations> ctor - ) { + public static InternalAggregations reduce(List aggregationsList, ReduceContext context) { if (aggregationsList.isEmpty()) { return null; } @@ -280,14 +190,7 @@ public static InternalAggregations reduce( } } - return ctor.apply(reducedAggregations); - } - - /** - * Version of {@link #reduce(List, ReduceContext, Function)} for nodes inside the aggregation tree. - */ - public static InternalAggregations reduce(List aggregationsList, ReduceContext context) { - return reduce(aggregationsList, context, InternalAggregations::from); + return new InternalAggregations(reducedAggregations); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java index e9e1f13a80766..e4f0294ae0723 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/AvgBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -59,18 +57,6 @@ public class AvgBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, format, metadata); } - /** - * Read from a stream. - */ - public AvgBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return AvgBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { count = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java index 4836428027f08..5a2e1238e19c7 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketMetricsPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregation; import org.opensearch.search.aggregations.Aggregations; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; import org.opensearch.search.aggregations.support.AggregationPath; -import java.io.IOException; import java.util.List; import java.util.Map; @@ -70,24 +67,6 @@ public abstract class BucketMetricsPipelineAggregator extends SiblingPipelineAgg this.format = format; } - /** - * Read from a stream. - */ - BucketMetricsPipelineAggregator(StreamInput in) throws IOException { - super(in); - format = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - } - - @Override - public final void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(format); - gapPolicy.writeTo(out); - innerWriteTo(out); - } - - protected void innerWriteTo(StreamOutput out) throws IOException {} - @Override public final InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { preCollection(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java index abf98b6ea4a6f..41e11e2537845 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.BucketAggregationScript; import org.opensearch.script.Script; import org.opensearch.search.DocValueFormat; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -79,31 +76,6 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { this.gapPolicy = gapPolicy; } - /** - * Read from a stream. - */ - @SuppressWarnings("unchecked") - public BucketScriptPipelineAggregator(StreamInput in) throws IOException { - super(in); - script = new Script(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - bucketsPathsMap = (Map) in.readGenericValue(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - script.writeTo(out); - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeGenericValue(bucketsPathsMap); - } - - @Override - public String getWriteableName() { - return BucketScriptPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation originalAgg = diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java index 8f5426eae83a4..24f64bc78815d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSelectorPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.BucketAggregationSelectorScript; import org.opensearch.script.Script; import org.opensearch.search.aggregations.InternalAggregation; @@ -41,7 +39,6 @@ import org.opensearch.search.aggregations.InternalMultiBucketAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -72,29 +69,6 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { this.gapPolicy = gapPolicy; } - /** - * Read from a stream. - */ - @SuppressWarnings("unchecked") - public BucketSelectorPipelineAggregator(StreamInput in) throws IOException { - super(in); - script = new Script(in); - gapPolicy = GapPolicy.readFrom(in); - bucketsPathsMap = (Map) in.readGenericValue(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - script.writeTo(out); - gapPolicy.writeTo(out); - out.writeGenericValue(bucketsPathsMap); - } - - @Override - public String getWriteableName() { - return BucketSelectorPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation originalAgg = diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java index 809514614b1f2..571fbf5ad42a6 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/BucketSortPipelineAggregator.java @@ -31,8 +31,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; import org.opensearch.search.aggregations.InternalMultiBucketAggregation; @@ -41,7 +39,6 @@ import org.opensearch.search.sort.FieldSortBuilder; import org.opensearch.search.sort.SortOrder; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -75,30 +72,6 @@ public class BucketSortPipelineAggregator extends PipelineAggregator { this.gapPolicy = gapPolicy; } - /** - * Read from a stream. - */ - public BucketSortPipelineAggregator(StreamInput in) throws IOException { - super(in); - sorts = in.readList(FieldSortBuilder::new); - from = in.readVInt(); - size = in.readOptionalVInt(); - gapPolicy = GapPolicy.readFrom(in); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeList(sorts); - out.writeVInt(from); - out.writeOptionalVInt(size); - gapPolicy.writeTo(out); - } - - @Override - public String getWriteableName() { - return BucketSortPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation originalAgg = diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java index cba35c89cc639..c11ed9d7ded7d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -65,24 +62,6 @@ public class CumulativeSumPipelineAggregator extends PipelineAggregator { this.formatter = formatter; } - /** - * Read from a stream. - */ - public CumulativeSumPipelineAggregator(StreamInput in) throws IOException { - super(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(formatter); - } - - @Override - public String getWriteableName() { - return CumulativeSumPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java index 3603d1c5a0c58..4bdd2c3168fae 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/DerivativePipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -43,7 +41,6 @@ import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -76,28 +73,6 @@ public class DerivativePipelineAggregator extends PipelineAggregator { this.xAxisUnits = xAxisUnits == null ? null : (double) xAxisUnits; } - /** - * Read from a stream. - */ - public DerivativePipelineAggregator(StreamInput in) throws IOException { - super(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - xAxisUnits = in.readOptionalDouble(); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeOptionalDouble(xAxisUnits); - } - - @Override - public String getWriteableName() { - return DerivativePipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java index a63a1da332d08..81f4a441e3772 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregator.java @@ -32,13 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -66,24 +63,6 @@ public class ExtendedStatsBucketPipelineAggregator extends BucketMetricsPipeline this.sigma = sigma; } - /** - * Read from a stream. - */ - public ExtendedStatsBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - sigma = in.readDouble(); - } - - @Override - protected void innerWriteTo(StreamOutput out) throws IOException { - out.writeDouble(sigma); - } - - @Override - public String getWriteableName() { - return ExtendedStatsBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { sum = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java index 1e327919755c7..7e63af31a9c86 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MaxBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -61,18 +59,6 @@ public class MaxBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, formatter, metadata); } - /** - * Read from a stream. - */ - public MaxBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return MaxBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { maxBucketKeys = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java index 2b57aac14ea1e..773fc7441d96e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MinBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -61,18 +59,6 @@ public class MinBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, formatter, metadata); } - /** - * Read from a stream. - */ - public MinBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return MinBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { minBucketKeys = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java index 2e3c44ac3902d..aa78439784992 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java @@ -33,8 +33,6 @@ package org.opensearch.search.aggregations.pipeline; import org.opensearch.common.collect.EvictingQueue; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -45,7 +43,6 @@ import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.ListIterator; @@ -88,34 +85,6 @@ public class MovAvgPipelineAggregator extends PipelineAggregator { this.minimize = minimize; } - /** - * Read from a stream. - */ - public MovAvgPipelineAggregator(StreamInput in) throws IOException { - super(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = GapPolicy.readFrom(in); - window = in.readVInt(); - predict = in.readVInt(); - model = in.readNamedWriteable(MovAvgModel.class); - minimize = in.readBoolean(); - } - - @Override - public void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeVInt(window); - out.writeVInt(predict); - out.writeNamedWriteable(model); - out.writeBoolean(minimize); - } - - @Override - public String getWriteableName() { - return MovAvgPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index 7b20a796b8134..a4c3c14f3365f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -32,8 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.Script; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; @@ -42,7 +40,6 @@ import org.opensearch.search.aggregations.bucket.MultiBucketsAggregation; import org.opensearch.search.aggregations.bucket.histogram.HistogramFactory; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -98,31 +95,6 @@ public class MovFnPipelineAggregator extends PipelineAggregator { this.shift = shift; } - public MovFnPipelineAggregator(StreamInput in) throws IOException { - super(in); - script = new Script(in); - formatter = in.readNamedWriteable(DocValueFormat.class); - gapPolicy = BucketHelpers.GapPolicy.readFrom(in); - bucketsPath = in.readString(); - window = in.readInt(); - shift = in.readInt(); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - script.writeTo(out); - out.writeNamedWriteable(formatter); - gapPolicy.writeTo(out); - out.writeString(bucketsPath); - out.writeInt(window); - out.writeInt(shift); - } - - @Override - public String getWriteableName() { - return MovFnPipelineAggregationBuilder.NAME; - } - @Override public InternalAggregation reduce(InternalAggregation aggregation, InternalAggregation.ReduceContext reduceContext) { InternalMultiBucketAggregation< diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java index 7fad7e233c424..41699ffb8b6b8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java @@ -32,13 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -69,26 +66,6 @@ public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAg this.keyed = keyed; } - /** - * Read from a stream. - */ - public PercentilesBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - percents = in.readDoubleArray(); - keyed = in.readBoolean(); - } - - @Override - public void innerWriteTo(StreamOutput out) throws IOException { - out.writeDoubleArray(percents); - out.writeBoolean(keyed); - } - - @Override - public String getWriteableName() { - return PercentilesBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { data = new ArrayList<>(1024); diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java index 4b1134d9e1a60..204dcdb7a400f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PipelineAggregator.java @@ -32,11 +32,7 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; -import org.opensearch.common.io.stream.NamedWriteable; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregation.ReduceContext; @@ -54,7 +50,7 @@ * * @opensearch.internal */ -public abstract class PipelineAggregator implements NamedWriteable { +public abstract class PipelineAggregator { /** * Parse the {@link PipelineAggregationBuilder} from a {@link XContentParser}. * @@ -139,55 +135,6 @@ protected PipelineAggregator(String name, String[] bucketsPaths, Map { diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java index 577f66f94e90f..a01be279853f5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/StatsBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -61,15 +59,6 @@ public class StatsBucketPipelineAggregator extends BucketMetricsPipelineAggregat super(name, bucketsPaths, gapPolicy, formatter, metadata); } - public StatsBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return StatsBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { sum = 0; diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java index 32a64fafa7a2d..01939b1b1063c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/SumBucketPipelineAggregator.java @@ -32,12 +32,10 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import java.io.IOException; import java.util.Map; /** @@ -58,18 +56,6 @@ public class SumBucketPipelineAggregator extends BucketMetricsPipelineAggregator super(name, bucketsPaths, gapPolicy, formatter, metadata); } - /** - * Read from a stream. - */ - public SumBucketPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return SumBucketPipelineAggregationBuilder.NAME; - } - @Override protected void preCollection() { sum = 0; diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index 31fdc5c9d9e9d..a0c2970625472 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TotalHits; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.DelayableWriteable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -95,11 +94,7 @@ public QuerySearchResult() { public QuerySearchResult(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - isNull = in.readBoolean(); - } else { - isNull = false; - } + isNull = in.readBoolean(); if (isNull == false) { ShardSearchContextId id = new ShardSearchContextId(in); readFromWithId(id, in); @@ -355,14 +350,8 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc } } setTopDocs(readTopDocs(in)); - if (in.getVersion().before(LegacyESVersion.V_7_7_0)) { - if (hasAggs = in.readBoolean()) { - aggregations = DelayableWriteable.referencing(InternalAggregations.readFrom(in)); - } - } else { - if (hasAggs = in.readBoolean()) { - aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); - } + if (hasAggs = in.readBoolean()) { + aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); } if (in.readBoolean()) { suggest = new Suggest(in); @@ -373,17 +362,13 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc hasProfileResults = profileShardResults != null; serviceTimeEWMA = in.readZLong(); nodeQueueSize = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new)); - setRescoreDocIds(new RescoreDocIds(in)); - } + setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new)); + setRescoreDocIds(new RescoreDocIds(in)); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - out.writeBoolean(isNull); - } + out.writeBoolean(isNull); if (isNull == false) { contextId.writeTo(out); writeToNoId(out); @@ -406,12 +391,7 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { - InternalAggregations aggs = aggregations.expand(); - aggs.writeTo(out); - } else { - aggregations.writeTo(out); - } + aggregations.writeTo(out); } if (suggest == null) { out.writeBoolean(false); @@ -424,10 +404,8 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeOptionalWriteable(profileShardResults); out.writeZLong(serviceTimeEWMA); out.writeInt(nodeQueueSize); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeOptionalWriteable(getShardSearchRequest()); - getRescoreDocIds().writeTo(out); - } + out.writeOptionalWriteable(getShardSearchRequest()); + getRescoreDocIds().writeTo(out); } public TotalHits getTotalHits() { diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index 05d4153949f9a..48a935ea984fa 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -56,7 +56,6 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.ChiSquare; import org.opensearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregator; import org.opensearch.search.aggregations.pipeline.InternalDerivative; import org.opensearch.search.aggregations.pipeline.MovAvgModel; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; @@ -193,7 +192,6 @@ public List getPipelineAggregations() { new PipelineAggregationSpec( DerivativePipelineAggregationBuilder.NAME, DerivativePipelineAggregationBuilder::new, - DerivativePipelineAggregator::new, DerivativePipelineAggregationBuilder::parse ).addResultReader(InternalDerivative::new) ); @@ -375,12 +373,7 @@ public void testRegisterPipelineAggregation() { @Override public List getPipelineAggregations() { return singletonList( - new PipelineAggregationSpec( - "test", - TestPipelineAggregationBuilder::new, - TestPipelineAggregator::new, - TestPipelineAggregationBuilder::fromXContent - ) + new PipelineAggregationSpec("test", TestPipelineAggregationBuilder::new, TestPipelineAggregationBuilder::fromXContent) ); } })); @@ -570,21 +563,10 @@ protected void validate(ValidationContext context) {} * Dummy test {@link PipelineAggregator} used to test registering aggregation builders. */ private static class TestPipelineAggregator extends PipelineAggregator { - /** - * Read from a stream. - */ - TestPipelineAggregator(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return "test"; + TestPipelineAggregator() { + super("test", new String[] {}, null); } - @Override - protected void doWriteTo(StreamOutput out) throws IOException {} - @Override public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { return null; diff --git a/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java index bb28c0657ac8e..2d022be10fe29 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/InternalAggregationsTests.java @@ -43,12 +43,9 @@ import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogramTests; import org.opensearch.search.aggregations.bucket.terms.StringTerms; import org.opensearch.search.aggregations.bucket.terms.StringTermsTests; -import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.InternalSimpleValueTests; import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.PipelineAggregator; -import org.opensearch.search.aggregations.pipeline.SiblingPipelineAggregator; -import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; @@ -91,7 +88,6 @@ public void testNonFinalReduceTopLevelPipelineAggs() { ); List aggs = singletonList(InternalAggregations.from(Collections.singletonList(terms))); InternalAggregations reducedAggs = InternalAggregations.topLevelReduce(aggs, maxBucketReduceContext().forPartialReduction()); - assertEquals(1, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(1, reducedAggs.aggregations.size()); } @@ -116,7 +112,6 @@ public void testFinalReduceTopLevelPipelineAggs() { Collections.singletonList(aggs), maxBucketReduceContext().forFinalReduction() ); - assertEquals(0, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(2, reducedAggs.aggregations.size()); } @@ -130,10 +125,6 @@ private InternalAggregation.ReduceContextBuilder maxBucketReduceContext() { } public static InternalAggregations createTestInstance() throws Exception { - return createTestInstance(randomPipelineTree()); - } - - public static InternalAggregations createTestInstance(PipelineAggregator.PipelineTree pipelineTree) throws Exception { List aggsList = new ArrayList<>(); if (randomBoolean()) { StringTermsTests stringTermsTests = new StringTermsTests(); @@ -150,23 +141,7 @@ public static InternalAggregations createTestInstance(PipelineAggregator.Pipelin InternalSimpleValueTests simpleValueTests = new InternalSimpleValueTests(); aggsList.add(simpleValueTests.createTestInstance()); } - return new InternalAggregations(aggsList, () -> pipelineTree); - } - - private static PipelineAggregator.PipelineTree randomPipelineTree() { - List topLevelPipelineAggs = new ArrayList<>(); - if (randomBoolean()) { - if (randomBoolean()) { - topLevelPipelineAggs.add((SiblingPipelineAggregator) new MaxBucketPipelineAggregationBuilder("name1", "bucket1").create()); - } - if (randomBoolean()) { - topLevelPipelineAggs.add((SiblingPipelineAggregator) new AvgBucketPipelineAggregationBuilder("name2", "bucket2").create()); - } - if (randomBoolean()) { - topLevelPipelineAggs.add((SiblingPipelineAggregator) new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); - } - } - return new PipelineAggregator.PipelineTree(emptyMap(), topLevelPipelineAggs); + return new InternalAggregations(aggsList); } public void testSerialization() throws Exception { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java index 9c7196db68e97..c39445fef88c5 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/terms/SignificanceHeuristicTests.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.terms; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.InputStreamStreamInput; @@ -57,7 +56,6 @@ import org.opensearch.search.aggregations.bucket.terms.heuristic.MutualInformation; import org.opensearch.search.aggregations.bucket.terms.heuristic.PercentageScore; import org.opensearch.search.aggregations.bucket.terms.heuristic.SignificanceHeuristic; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.InternalAggregationTestCase; @@ -95,9 +93,6 @@ public void testStreamResponse() throws Exception { ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); out.setVersion(version); - if (version.before(LegacyESVersion.V_7_8_0)) { - sigTerms.mergePipelineTreeForBWCSerialization(PipelineAggregator.PipelineTree.EMPTY); - } out.writeNamedWriteable(sigTerms); // read diff --git a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java index 5325c48e16913..2cba15f2e2039 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalAggregationTestCase.java @@ -144,12 +144,10 @@ import org.opensearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.DerivativePipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.InternalBucketMetricValue; import org.opensearch.search.aggregations.pipeline.InternalSimpleValue; -import org.opensearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; import org.opensearch.search.aggregations.pipeline.ParsedBucketMetricValue; import org.opensearch.search.aggregations.pipeline.ParsedDerivative; import org.opensearch.search.aggregations.pipeline.ParsedExtendedStatsBucket; @@ -160,7 +158,6 @@ import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.opensearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; -import org.opensearch.search.aggregations.pipeline.SumBucketPipelineAggregationBuilder; import org.opensearch.test.hamcrest.OpenSearchAssertions; import java.io.IOException; @@ -175,7 +172,6 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; import static org.opensearch.common.xcontent.XContentHelper.toXContent; import static org.opensearch.search.aggregations.InternalMultiBucketAggregation.countInnerBucket; @@ -482,60 +478,6 @@ public final void testFromXContentWithRandomFields() throws IOException { assertFromXContent(aggregation, parsedAggregation); } - public void testMergePipelineTreeForBWCSerialization() { - T agg = createTestInstance(); - PipelineAggregator.PipelineTree pipelineTree = randomPipelineTree(agg); - agg.mergePipelineTreeForBWCSerialization(pipelineTree); - assertMergedPipelineTreeForBWCSerialization(agg, pipelineTree); - } - - public void testMergePipelineTreeTwice() { - T agg = createTestInstance(); - PipelineAggregator.PipelineTree pipelineTree = randomPipelineTree(agg); - agg.mergePipelineTreeForBWCSerialization(pipelineTree); - agg.mergePipelineTreeForBWCSerialization(randomPipelineTree(agg)); // This should be ignored - assertMergedPipelineTreeForBWCSerialization(agg, pipelineTree); - } - - public static PipelineAggregator.PipelineTree randomPipelineTree(InternalAggregation aggregation) { - Map subTree = new HashMap<>(); - aggregation.forEachBucket(bucketAggs -> { - for (Aggregation subAgg : bucketAggs) { - if (subTree.containsKey(subAgg.getName())) { - continue; - } - subTree.put(subAgg.getName(), randomPipelineTree((InternalAggregation) subAgg)); - } - }); - return new PipelineAggregator.PipelineTree(emptyMap(), randomPipelineAggregators()); - } - - public static List randomPipelineAggregators() { - List pipelines = new ArrayList<>(); - if (randomBoolean()) { - if (randomBoolean()) { - pipelines.add(new MaxBucketPipelineAggregationBuilder("name1", "bucket1").create()); - } - if (randomBoolean()) { - pipelines.add(new AvgBucketPipelineAggregationBuilder("name2", "bucket2").create()); - } - if (randomBoolean()) { - pipelines.add(new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); - } - } - return pipelines; - } - - @SuppressWarnings("deprecation") - private void assertMergedPipelineTreeForBWCSerialization(InternalAggregation agg, PipelineAggregator.PipelineTree pipelineTree) { - assertThat(agg.pipelineAggregatorsForBwcSerialization(), equalTo(pipelineTree.aggregators())); - agg.forEachBucket(bucketAggs -> { - for (Aggregation subAgg : bucketAggs) { - assertMergedPipelineTreeForBWCSerialization((InternalAggregation) subAgg, pipelineTree.subTree(subAgg.getName())); - } - }); - } - protected abstract void assertFromXContent(T aggregation, ParsedAggregation parsedAggregation) throws IOException; @SuppressWarnings("unchecked")