Skip to content

Commit

Permalink
Address review comments
Browse files Browse the repository at this point in the history
Signed-off-by: Sorabh Hamirwasia <sohami.apache@gmail.com>
  • Loading branch information
sohami committed Aug 7, 2023
1 parent 85cdd8f commit 550a7ec
Show file tree
Hide file tree
Showing 6 changed files with 127 additions and 115 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -946,7 +946,7 @@ private boolean useConcurrentSearch(Executor concurrentSearchExecutor) {

@Override
public int getTargetMaxSliceCount() {
if (!isConcurrentSegmentSearchEnabled()) {
if (isConcurrentSegmentSearchEnabled() == false) {
throw new IllegalStateException("Target slice count should not be used when concurrent search is disabled");

Check warning on line 950 in server/src/main/java/org/opensearch/search/DefaultSearchContext.java

View check run for this annotation

Codecov / codecov/patch

server/src/main/java/org/opensearch/search/DefaultSearchContext.java#L950

Added line #L950 was not covered by tests
}
return clusterService.getClusterSettings().get(SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_SETTING);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv

// settings to configure maximum slice created per search request using OS custom slice computation mechanism. Default lucene
// mechanism will not be used if this setting is set with value > 0
public static final String CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY = "search.concurrent.max_slice";
public static final String CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_KEY = "search.concurrent.max_slice_count";
public static final int CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE = 0;

// value == 0 means lucene slice computation will be used
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ public CollectionStatistics collectionStatistics(String field) throws IOExceptio
* @return leafSlice group to be executed by different threads
*/
@Override
public LeafSlice[] slices(List<LeafReaderContext> leaves) {
protected LeafSlice[] slices(List<LeafReaderContext> leaves) {
return slicesInternal(leaves, searchContext.getTargetMaxSliceCount());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ static IndexSearcher.LeafSlice[] getSlices(List<LeafReaderContext> leaves, int t
// Sort by maxDoc, descending:
sortedLeaves.sort(Collections.reverseOrder(Comparator.comparingInt(l -> l.reader().maxDoc())));

final List<List<LeafReaderContext>> groupedLeaves = new ArrayList<>();
final List<List<LeafReaderContext>> groupedLeaves = new ArrayList<>(targetSliceCount);
for (int i = 0; i < targetSliceCount; ++i) {
groupedLeaves.add(new ArrayList<>());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -309,111 +309,119 @@ public void onRemoval(ShardId shardId, Accountable accountable) {

public void testSlicesInternal() throws Exception {
final List<LeafReaderContext> leaves = getLeaves(10);
try (
final Directory directory = newDirectory();
IndexWriter iw = new IndexWriter(
directory,
new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)
)
) {
Document document = new Document();
document.add(new StringField("field1", "value1", Field.Store.NO));
document.add(new StringField("field2", "value1", Field.Store.NO));
iw.addDocument(document);
iw.commit();
try (DirectoryReader directoryReader = DirectoryReader.open(directory)) {
SearchContext searchContext = mock(SearchContext.class);
IndexShard indexShard = mock(IndexShard.class);
when(searchContext.indexShard()).thenReturn(indexShard);
when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR);
ContextIndexSearcher searcher = new ContextIndexSearcher(
directoryReader,
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
true,
null,
searchContext
);
// Case 1: Verify the slice count when lucene default slice computation is used
IndexSearcher.LeafSlice[] slices = searcher.slicesInternal(
leaves,
SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE
);
int expectedSliceCount = 2;
// 2 slices will be created since max segment per slice of 5 will be reached
assertEquals(expectedSliceCount, slices.length);
for (int i = 0; i < expectedSliceCount; ++i) {
assertEquals(5, slices[i].leaves.length);
}

final Directory directory = newDirectory();
IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE));
Document document = new Document();
document.add(new StringField("field1", "value1", Field.Store.NO));
document.add(new StringField("field2", "value1", Field.Store.NO));
iw.addDocument(document);
iw.commit();
DirectoryReader directoryReader = DirectoryReader.open(directory);

SearchContext searchContext = mock(SearchContext.class);
IndexShard indexShard = mock(IndexShard.class);
when(searchContext.indexShard()).thenReturn(indexShard);
when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR);
ContextIndexSearcher searcher = new ContextIndexSearcher(
directoryReader,
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
true,
null,
searchContext
);
// Case 1: Verify the slice count when lucene default slice computation is used
IndexSearcher.LeafSlice[] slices = searcher.slicesInternal(
leaves,
SearchService.CONCURRENT_SEGMENT_SEARCH_TARGET_MAX_SLICE_COUNT_DEFAULT_VALUE
);
int expectedSliceCount = 2;
// 2 slices will be created since max segment per slice of 5 will be reached
assertEquals(expectedSliceCount, slices.length);
for (int i = 0; i < expectedSliceCount; ++i) {
assertEquals(5, slices[i].leaves.length);
}

// Case 2: Verify the slice count when custom max slice computation is used
expectedSliceCount = 4;
slices = searcher.slicesInternal(leaves, expectedSliceCount);

// 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices
assertEquals(expectedSliceCount, slices.length);
for (int i = 0; i < expectedSliceCount; ++i) {
if (i < 2) {
assertEquals(3, slices[i].leaves.length);
} else {
assertEquals(2, slices[i].leaves.length);
// Case 2: Verify the slice count when custom max slice computation is used
expectedSliceCount = 4;
slices = searcher.slicesInternal(leaves, expectedSliceCount);

// 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices
assertEquals(expectedSliceCount, slices.length);
for (int i = 0; i < expectedSliceCount; ++i) {
if (i < 2) {
assertEquals(3, slices[i].leaves.length);
} else {
assertEquals(2, slices[i].leaves.length);
}
}
}
}
IOUtils.close(directoryReader, iw, directory);
}

public void testGetSlicesWithNonNullExecutorButCSDisabled() throws Exception {
final List<LeafReaderContext> leaves = getLeaves(10);

final Directory directory = newDirectory();
IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE));
Document document = new Document();
document.add(new StringField("field1", "value1", Field.Store.NO));
document.add(new StringField("field2", "value1", Field.Store.NO));
iw.addDocument(document);
iw.commit();
DirectoryReader directoryReader = DirectoryReader.open(directory);

SearchContext searchContext = mock(SearchContext.class);
IndexShard indexShard = mock(IndexShard.class);
when(searchContext.indexShard()).thenReturn(indexShard);
when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR);
when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(false);
ContextIndexSearcher searcher = new ContextIndexSearcher(
directoryReader,
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
true,
null,
searchContext
);
// Case 1: Verify getSlices return null when concurrent segment search is disabled
assertNull(searcher.getSlices());

// Case 2: Verify the slice count when custom max slice computation is used
searcher = new ContextIndexSearcher(
directoryReader,
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
true,
mock(ExecutorService.class),
searchContext
);
when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(true);
when(searchContext.getTargetMaxSliceCount()).thenReturn(4);
int expectedSliceCount = 4;
IndexSearcher.LeafSlice[] slices = searcher.slices(leaves);

// 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices
assertEquals(expectedSliceCount, slices.length);
for (int i = 0; i < expectedSliceCount; ++i) {
if (i < 2) {
assertEquals(3, slices[i].leaves.length);
} else {
assertEquals(2, slices[i].leaves.length);
try (
final Directory directory = newDirectory();
IndexWriter iw = new IndexWriter(
directory,
new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)
)
) {
Document document = new Document();
document.add(new StringField("field1", "value1", Field.Store.NO));
document.add(new StringField("field2", "value1", Field.Store.NO));
iw.addDocument(document);
iw.commit();
try (DirectoryReader directoryReader = DirectoryReader.open(directory);) {
SearchContext searchContext = mock(SearchContext.class);
IndexShard indexShard = mock(IndexShard.class);
when(searchContext.indexShard()).thenReturn(indexShard);
when(searchContext.bucketCollectorProcessor()).thenReturn(SearchContext.NO_OP_BUCKET_COLLECTOR_PROCESSOR);
when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(false);
ContextIndexSearcher searcher = new ContextIndexSearcher(
directoryReader,
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
true,
null,
searchContext
);
// Case 1: Verify getSlices return null when concurrent segment search is disabled
assertNull(searcher.getSlices());

// Case 2: Verify the slice count when custom max slice computation is used
searcher = new ContextIndexSearcher(
directoryReader,
IndexSearcher.getDefaultSimilarity(),
IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(),
true,
mock(ExecutorService.class),
searchContext
);
when(searchContext.isConcurrentSegmentSearchEnabled()).thenReturn(true);
when(searchContext.getTargetMaxSliceCount()).thenReturn(4);
int expectedSliceCount = 4;
IndexSearcher.LeafSlice[] slices = searcher.slices(leaves);

// 4 slices will be created with 3 leaves in first 2 slices and 2 leaves in other slices
assertEquals(expectedSliceCount, slices.length);
for (int i = 0; i < expectedSliceCount; ++i) {
if (i < 2) {
assertEquals(3, slices[i].leaves.length);
} else {
assertEquals(2, slices[i].leaves.length);
}
}
}
}
IOUtils.close(directoryReader, iw, directory);
}

private SparseFixedBitSet query(LeafReaderContext leaf, String field, String value) throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,25 @@ public class IndexReaderUtils {
* @return created leaves
*/
public static List<LeafReaderContext> getLeaves(int leafCount) throws Exception {
final Directory directory = newDirectory();
IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE));
for (int i = 0; i < leafCount; ++i) {
Document document = new Document();
final String fieldValue = "value" + i;
document.add(new StringField("field1", fieldValue, Field.Store.NO));
document.add(new StringField("field2", fieldValue, Field.Store.NO));
iw.addDocument(document);
iw.commit();
try (
final Directory directory = newDirectory();
final IndexWriter iw = new IndexWriter(
directory,
new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)
)
) {
for (int i = 0; i < leafCount; ++i) {
Document document = new Document();
final String fieldValue = "value" + i;
document.add(new StringField("field1", fieldValue, Field.Store.NO));
document.add(new StringField("field2", fieldValue, Field.Store.NO));
iw.addDocument(document);
iw.commit();
}
try (DirectoryReader directoryReader = DirectoryReader.open(directory)) {
List<LeafReaderContext> leaves = directoryReader.leaves();
return leaves;
}
}
iw.close();
DirectoryReader directoryReader = DirectoryReader.open(directory);
List<LeafReaderContext> leaves = directoryReader.leaves();
directoryReader.close();
directory.close();
return leaves;
}
}

0 comments on commit 550a7ec

Please sign in to comment.