Skip to content

Commit

Permalink
Adds shard and segment info in the logs
Browse files Browse the repository at this point in the history
Signed-off-by: Tejas Shah <[email protected]>
  • Loading branch information
shatejas committed Jan 30, 2025
1 parent 38756c3 commit d855696
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 14 deletions.
12 changes: 10 additions & 2 deletions src/main/java/org/opensearch/knn/index/query/KNNQuery.java
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,17 @@ public class KNNQuery extends Query {
private final String indexName;
private final VectorDataType vectorDataType;
private final RescoreContext rescoreContext;

@Setter
private Query filterQuery;
@Getter
private BitSetProducer parentsFilter;
private Float radius;
private Context context;

// Note: ideally query should not have to deal with shard level information. Adding it for logging purposes only
// TODO: ThreadContext does not work with logger, remove this from here once its figured out
private int shardId;

public KNNQuery(
final String field,
final float[] queryVector,
Expand Down Expand Up @@ -179,7 +182,12 @@ public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float bo
final Weight filterWeight = getFilterWeight(searcher);
if (log.isDebugEnabled() && stopWatch != null) {
stopWatch.stop();
log.debug("Creating filter weight for field [{}] took [{}] nanos", field, stopWatch.totalTime().nanos());
log.debug(
"Creating filter weight, Shard: [{}], field: [{}] took in nanos: [{}]",
shardId,
field,
stopWatch.totalTime().nanos()
);
}

if (filterWeight != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,11 @@ public static Query create(CreateQueryRequest createQueryRequest) {
final KNNEngine knnEngine = createQueryRequest.getKnnEngine();
final boolean expandNested = createQueryRequest.getExpandNested().orElse(false);
BitSetProducer parentFilter = null;
int shardId = -1;
if (createQueryRequest.getContext().isPresent()) {
QueryShardContext context = createQueryRequest.getContext().get();
parentFilter = context.getParentFilter();
shardId = context.getShardId();
}

if (parentFilter == null && expandNested) {
Expand Down Expand Up @@ -93,6 +95,7 @@ public static Query create(CreateQueryRequest createQueryRequest) {
.filterQuery(validatedFilterQuery)
.vectorDataType(vectorDataType)
.rescoreContext(rescoreContext)
.shardId(shardId)
.build();
break;
default:
Expand All @@ -106,6 +109,7 @@ public static Query create(CreateQueryRequest createQueryRequest) {
.filterQuery(validatedFilterQuery)
.vectorDataType(vectorDataType)
.rescoreContext(rescoreContext)
.shardId(shardId)
.build();
}

Expand Down
28 changes: 16 additions & 12 deletions src/main/java/org/opensearch/knn/index/query/KNNWeight.java
Original file line number Diff line number Diff line change
Expand Up @@ -131,9 +131,12 @@ public Scorer scorer(LeafReaderContext context) throws IOException {
* @return A Map of docId to scores for top k results
*/
public PerLeafResult searchLeaf(LeafReaderContext context, int k) throws IOException {
final SegmentReader reader = Lucene.segmentReader(context.reader());
final String segmentName = reader.getSegmentName();

StopWatch stopWatch = startStopWatch();
final BitSet filterBitSet = getFilteredDocsBitSet(context);
stopStopWatchAndLog(stopWatch, "FilterBitSet creation");
stopStopWatchAndLog(stopWatch, "FilterBitSet creation", segmentName);

final int maxDoc = context.reader().maxDoc();
int cardinality = filterBitSet.cardinality();
Expand All @@ -149,7 +152,7 @@ public PerLeafResult searchLeaf(LeafReaderContext context, int k) throws IOExcep
* This improves the recall.
*/
if (isFilteredExactSearchPreferred(cardinality)) {
Map<Integer, Float> result = doExactSearch(context, new BitSetIterator(filterBitSet, cardinality), cardinality, k);
Map<Integer, Float> result = doExactSearch(context, new BitSetIterator(filterBitSet, cardinality), cardinality, k, segmentName);
return new PerLeafResult(filterWeight == null ? null : filterBitSet, result);
}

Expand All @@ -160,25 +163,25 @@ public PerLeafResult searchLeaf(LeafReaderContext context, int k) throws IOExcep
final BitSet annFilter = (filterWeight != null && cardinality == maxDoc) ? null : filterBitSet;

StopWatch annStopWatch = startStopWatch();
final Map<Integer, Float> docIdsToScoreMap = doANNSearch(context, annFilter, cardinality, k);
stopStopWatchAndLog(annStopWatch, "ANN search");
final Map<Integer, Float> docIdsToScoreMap = doANNSearch(reader, context, annFilter, cardinality, k);
stopStopWatchAndLog(annStopWatch, "ANN search", segmentName);

// See whether we have to perform exact search based on approx search results
// This is required if there are no native engine files or if approximate search returned
// results less than K, though we have more than k filtered docs
if (isExactSearchRequire(context, cardinality, docIdsToScoreMap.size())) {
final BitSetIterator docs = filterWeight != null ? new BitSetIterator(filterBitSet, cardinality) : null;
Map<Integer, Float> result = doExactSearch(context, docs, cardinality, k);
Map<Integer, Float> result = doExactSearch(context, docs, cardinality, k, segmentName);
return new PerLeafResult(filterWeight == null ? null : filterBitSet, result);
}
return new PerLeafResult(filterWeight == null ? null : filterBitSet, docIdsToScoreMap);
}

private void stopStopWatchAndLog(@Nullable final StopWatch stopWatch, final String prefixMessage) {
private void stopStopWatchAndLog(@Nullable final StopWatch stopWatch, final String prefixMessage, String segmentName) {
if (log.isDebugEnabled() && stopWatch != null) {
stopWatch.stop();
final String logMessage = prefixMessage + ", field: [{}], time in nanos:[{}] ";
log.debug(logMessage, knnQuery.getField(), stopWatch.totalTime().nanos());
final String logMessage = prefixMessage + " shard: [{}], segment: [{}], field: [{}], time in nanos:[{}] ";
log.debug(logMessage, knnQuery.getShardId(), segmentName, knnQuery.getField(), stopWatch.totalTime().nanos());
}
}

Expand Down Expand Up @@ -238,7 +241,8 @@ private Map<Integer, Float> doExactSearch(
final LeafReaderContext context,
final DocIdSetIterator acceptedDocs,
final long numberOfAcceptedDocs,
int k
final int k,
final String segmentName
) throws IOException {
final ExactSearcherContextBuilder exactSearcherContextBuilder = ExactSearcher.ExactSearcherContext.builder()
.isParentHits(true)
Expand All @@ -253,13 +257,12 @@ private Map<Integer, Float> doExactSearch(
}

private Map<Integer, Float> doANNSearch(
final SegmentReader reader,
final LeafReaderContext context,
final BitSet filterIdsBitSet,
final int cardinality,
final int k
) throws IOException {
final SegmentReader reader = Lucene.segmentReader(context.reader());

FieldInfo fieldInfo = FieldInfoExtractor.getFieldInfo(reader, knnQuery.getField());

if (fieldInfo == null) {
Expand Down Expand Up @@ -420,7 +423,8 @@ public Map<Integer, Float> exactSearch(
) throws IOException {
StopWatch stopWatch = startStopWatch();
Map<Integer, Float> exactSearchResults = exactSearcher.searchLeaf(leafReaderContext, exactSearcherContext);
stopStopWatchAndLog(stopWatch, "Exact search");
final SegmentReader reader = Lucene.segmentReader(leafReaderContext.reader());
stopStopWatchAndLog(stopWatch, "Exact search", reader.getSegmentName());
return exactSearchResults;
}

Expand Down

0 comments on commit d855696

Please sign in to comment.