Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support for index level slice count setting #15336

Merged
merged 1 commit into from
Aug 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Support filtering on a large list encoded by bitmap ([#14774](https://github.com/opensearch-project/OpenSearch/pull/14774))
- Add slice execution listeners to SearchOperationListener interface ([#15153](https://github.com/opensearch-project/OpenSearch/pull/15153))
- Adding access to noSubMatches and noOverlappingMatches in Hyphenation ([#13895](https://github.com/opensearch-project/OpenSearch/pull/13895))
- Add support for index level max slice count setting for concurrent segment search ([#15336](https://github.com/opensearch-project/OpenSearch/pull/15336))

### Dependencies
- Bump `netty` from 4.1.111.Final to 4.1.112.Final ([#15081](https://github.com/opensearch-project/OpenSearch/pull/15081))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {

// Settings for concurrent segment search
IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING,
IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT,
IndexSettings.ALLOW_DERIVED_FIELDS,

// Settings for star tree index
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING;
import static org.opensearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING;
import static org.opensearch.index.store.remote.directory.RemoteSnapshotDirectory.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY_MINIMUM_VERSION;
import static org.opensearch.search.SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE;

/**
* This class encapsulates all index level settings and handles settings updates.
Expand Down Expand Up @@ -691,6 +692,14 @@ public static IndexMergePolicy fromString(String text) {
Property.Dynamic
);

public static final Setting<Integer> INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT = Setting.intSetting(
"index.search.concurrent.max_slice_count",
jed326 marked this conversation as resolved.
Show resolved Hide resolved
CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE,
CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE,
Property.Dynamic,
Property.IndexScope
);

public static final Setting<Boolean> INDEX_DOC_ID_FUZZY_SET_ENABLED_SETTING = Setting.boolSetting(
"index.optimize_doc_id_lookup.fuzzy_set.enabled",
false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -991,7 +991,14 @@ public int getTargetMaxSliceCount() {
if (shouldUseConcurrentSearch() == false) {
throw new IllegalStateException("Target slice count should not be used when concurrent search is disabled");
}
return clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING);

return indexService.getIndexSettings()
.getSettings()
.getAsInt(
IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey(),
clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING)
);

}

@Override
Expand Down
101 changes: 101 additions & 0 deletions server/src/test/java/org/opensearch/search/SearchServiceTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import org.opensearch.action.support.IndicesOptions;
import org.opensearch.action.support.PlainActionFuture;
import org.opensearch.action.support.WriteRequest;
import org.opensearch.cluster.service.ClusterService;
import org.opensearch.common.UUIDs;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.settings.SettingsException;
Expand Down Expand Up @@ -1413,6 +1414,106 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException {
.get();
}

/**
* Tests that the slice count is calculated correctly when concurrent search is enabled
* If concurrent search enabled -
* pick index level slice count setting if index level setting is set
* else pick default cluster level slice count setting
* @throws IOException
*/
public void testConcurrentSegmentSearchSliceCount() throws IOException {

String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT);
IndexService indexService = createIndex(index);
final SearchService service = getInstanceFromNode(SearchService.class);
ClusterService clusterService = getInstanceFromNode(ClusterService.class);
ShardId shardId = new ShardId(indexService.index(), 0);
long nowInMillis = System.currentTimeMillis();
String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10);
SearchRequest searchRequest = new SearchRequest();
searchRequest.allowPartialSearchResults(randomBoolean());
ShardSearchRequest request = new ShardSearchRequest(
OriginalIndices.NONE,
searchRequest,
shardId,
indexService.numberOfShards(),
AliasFilter.EMPTY,
1f,
nowInMillis,
clusterAlias,
Strings.EMPTY_ARRAY
);
// enable concurrent search
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true))
.get();

Integer[][] scenarios = {
// cluster setting, index setting, expected slice count
// expected value null will pick up default value from settings
{ null, null, clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING) },
{ 4, null, 4 },
{ null, 3, 3 },
{ 4, 3, 3 }, };

for (Integer[] sliceCounts : scenarios) {
Integer clusterSliceCount = sliceCounts[0];
Integer indexSliceCount = sliceCounts[1];
Integer targetSliceCount = sliceCounts[2];

if (clusterSliceCount != null) {
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(
Settings.builder()
.put(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey(), clusterSliceCount)
)
.get();
} else {
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(
Settings.builder().putNull(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey())
)
.get();
}
if (indexSliceCount != null) {
client().admin()
.indices()
.prepareUpdateSettings(index)
.setSettings(
Settings.builder().put(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey(), indexSliceCount)
)
.get();
} else {
client().admin()
.indices()
.prepareUpdateSettings(index)
.setSettings(Settings.builder().putNull(IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_MAX_SLICE_COUNT.getKey()))
.get();
}

try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) {
searchContext.evaluateRequestShouldUseConcurrentSearch();
assertEquals(targetSliceCount.intValue(), searchContext.getTargetMaxSliceCount());
}
}
// cleanup
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(
Settings.builder()
.putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey())
.putNull(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING.getKey())
)
.get();
}

/**
* Test that the Search Context for concurrent segment search enabled is set correctly at the time of construction.
* The same is used throughout the context object lifetime even if cluster setting changes before the request completion.
Expand Down
Loading