From cedbb9eb6aaf2289c6957cdc3b1b43d1daf90b8a Mon Sep 17 00:00:00 2001 From: Peter Zhu Date: Wed, 22 Jan 2025 19:36:19 -0500 Subject: [PATCH 01/48] Switch alpha to alpha1 and update publish maven to support qualifier and meet requirements (#17094) * Switch alpha to alpha1 and update publish maven to support qualifier and meet requirements Signed-off-by: Peter Zhu * Enhance logics Signed-off-by: Peter Zhu --------- Signed-off-by: Peter Zhu --- .github/workflows/publish-maven-snapshots.yml | 4 ++-- buildSrc/version.properties | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/publish-maven-snapshots.yml b/.github/workflows/publish-maven-snapshots.yml index bdf4b6a315186..14642f251f25b 100644 --- a/.github/workflows/publish-maven-snapshots.yml +++ b/.github/workflows/publish-maven-snapshots.yml @@ -19,7 +19,7 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up JDK 17 + - name: Set up JDK 21 uses: actions/setup-java@v4 with: distribution: temurin @@ -37,4 +37,4 @@ jobs: export SONATYPE_PASSWORD=$(aws secretsmanager get-secret-value --secret-id maven-snapshots-password --query SecretString --output text) echo "::add-mask::$SONATYPE_USERNAME" echo "::add-mask::$SONATYPE_PASSWORD" - ./gradlew publishNebulaPublicationToSnapshotsRepository + ./gradlew publishNebulaPublicationToSnapshotsRepository -Dbuild.version_qualifier=alpha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index cc6ca32167480..61f4fbbf10b1d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,2 +1,2 @@ # Please use ../gradle/libs.versions.toml for dependency management -opensearch = 3.0.0-alpha +opensearch = 3.0.0 From e15f71265e1da74d89025373ec96980fffb93640 Mon Sep 17 00:00:00 2001 From: Junwei Dai <59641585+junweid62@users.noreply.github.com> Date: Wed, 22 Jan 2025 18:08:30 -0800 Subject: [PATCH 02/48] Add verbose pipeline parameter to output each processor's execution details (#16843) * Add verbose pipeline parameter to output each processor's execution details Signed-off-by: Junwei Dai * add change log Signed-off-by: Junwei Dai # Conflicts: # CHANGELOG.md * Refactor ProcessorExecutionDetail to improve field handling Signed-off-by: Junwei Dai * Fix ITtest Fail Signed-off-by: Junwei Dai * Add more unit test Signed-off-by: Junwei Dai * resolve comments Signed-off-by: Junwei Dai * 1.add todo to change version.current 2.use exist xcontentUtil to read 3.move processor excution key to ProcessorExecutionDetail Signed-off-by: Junwei Dai * refactor code Signed-off-by: Junwei Dai * refactor code based on the comment Signed-off-by: Junwei Dai * refactor code based on the comment Signed-off-by: Junwei Dai * 1.add javadoc 2.refactor error message Signed-off-by: Junwei Dai * change error message Signed-off-by: Junwei Dai * 1.Added wrappers for tracking execution details of search processors. 2.Removed redundant logic for cleaner and simpler implementation. Signed-off-by: Junwei Dai * change version to 3.0.0 Signed-off-by: Junwei Dai * fix unit test Signed-off-by: Junwei Dai * fix unit test Signed-off-by: Junwei Dai * addressed comments 1. removed unnecessary log Signed-off-by: Junwei Dai * addressed comments Signed-off-by: Junwei Dai * revise comment to opensearch.api Signed-off-by: Junwei Dai * removed unused logger and comment Signed-off-by: Junwei Dai * removed unnecessary try catch block. add more comment Signed-off-by: Junwei Dai * addressed comments Signed-off-by: Junwei Dai * remove wrong unit test Signed-off-by: Junwei Dai --------- Signed-off-by: Junwei Dai Co-authored-by: Junwei Dai --- CHANGELOG.md | 1 + .../action/search/SearchResponse.java | 11 +- .../action/search/SearchResponseSections.java | 28 +- .../rest/action/search/RestSearchAction.java | 3 + .../org/opensearch/search/SearchHits.java | 17 + .../search/builder/SearchSourceBuilder.java | 42 ++- .../internal/InternalSearchResponse.java | 43 ++- .../opensearch/search/pipeline/Pipeline.java | 20 +- .../pipeline/PipelineProcessingContext.java | 22 ++ .../search/pipeline/PipelinedRequest.java | 12 +- .../pipeline/ProcessorExecutionDetail.java | 299 ++++++++++++++++++ .../pipeline/SearchPipelineService.java | 4 +- ...TrackingSearchRequestProcessorWrapper.java | 83 +++++ ...rackingSearchResponseProcessorWrapper.java | 97 ++++++ .../action/search/SearchResponseTests.java | 44 ++- .../search/GenericSearchExtBuilderTests.java | 4 +- .../builder/SearchSourceBuilderTests.java | 24 ++ .../ProcessorExecutionDetailTests.java | 187 +++++++++++ .../pipeline/SearchPipelineServiceTests.java | 202 +++++++++++- ...ingSearchRequestProcessorWrapperTests.java | 60 ++++ ...ngSearchResponseProcessorWrapperTests.java | 75 +++++ 21 files changed, 1251 insertions(+), 27 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/pipeline/ProcessorExecutionDetail.java create mode 100644 server/src/main/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapper.java create mode 100644 server/src/main/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapper.java create mode 100644 server/src/test/java/org/opensearch/search/pipeline/ProcessorExecutionDetailTests.java create mode 100644 server/src/test/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapperTests.java create mode 100644 server/src/test/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapperTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 4bc7a246fd2b1..499405ac508e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/)) - Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods ([#16923](https://github.com/opensearch-project/OpenSearch/pull/16923)) - Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)). +- Add `verbose_pipeline` parameter to output each processor's execution details ([#16843](https://github.com/opensearch-project/OpenSearch/pull/16843)). - Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678)) - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponse.java b/server/src/main/java/org/opensearch/action/search/SearchResponse.java index 899c71e91e3ab..0d55fbf2e7f88 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponse.java @@ -59,6 +59,7 @@ import org.opensearch.search.aggregations.Aggregations; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.pipeline.ProcessorExecutionDetail; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.search.suggest.Suggest; @@ -73,6 +74,7 @@ import java.util.function.Supplier; import static org.opensearch.action.search.SearchResponseSections.EXT_FIELD; +import static org.opensearch.action.search.SearchResponseSections.PROCESSOR_RESULT_FIELD; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; /** @@ -394,6 +396,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE List failures = new ArrayList<>(); Clusters clusters = Clusters.EMPTY; List extBuilders = new ArrayList<>(); + List processorResult = new ArrayList<>(); for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) { if (token == Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -517,6 +520,11 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE extBuilders.add(searchExtBuilder); } } + } else if (PROCESSOR_RESULT_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != Token.END_ARRAY) { + ProcessorExecutionDetail detail = ProcessorExecutionDetail.fromXContent(parser); + processorResult.add(detail); + } } else { parser.skipChildren(); } @@ -530,7 +538,8 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE terminatedEarly, profile, numReducePhases, - extBuilders + extBuilders, + processorResult ); return new SearchResponse( searchResponseSections, diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index bca2c8a52b691..5eb305d91ee04 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -40,6 +40,7 @@ import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.Aggregations; +import org.opensearch.search.pipeline.ProcessorExecutionDetail; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.search.suggest.Suggest; @@ -65,7 +66,7 @@ public class SearchResponseSections implements ToXContentFragment { public static final ParseField EXT_FIELD = new ParseField("ext"); - + public static final ParseField PROCESSOR_RESULT_FIELD = new ParseField("processor_results"); protected final SearchHits hits; protected final Aggregations aggregations; protected final Suggest suggest; @@ -74,6 +75,7 @@ public class SearchResponseSections implements ToXContentFragment { protected final Boolean terminatedEarly; protected final int numReducePhases; protected final List searchExtBuilders = new ArrayList<>(); + protected final List processorResult = new ArrayList<>(); public SearchResponseSections( SearchHits hits, @@ -84,7 +86,17 @@ public SearchResponseSections( SearchProfileShardResults profileResults, int numReducePhases ) { - this(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases, Collections.emptyList()); + this( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + Collections.emptyList(), + Collections.emptyList() + ); } public SearchResponseSections( @@ -95,7 +107,8 @@ public SearchResponseSections( Boolean terminatedEarly, SearchProfileShardResults profileResults, int numReducePhases, - List searchExtBuilders + List searchExtBuilders, + List processorResult ) { this.hits = hits; this.aggregations = aggregations; @@ -104,6 +117,7 @@ public SearchResponseSections( this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; this.numReducePhases = numReducePhases; + this.processorResult.addAll(processorResult); this.searchExtBuilders.addAll(Objects.requireNonNull(searchExtBuilders, "searchExtBuilders must not be null")); } @@ -166,6 +180,10 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) } builder.endObject(); } + + if (!processorResult.isEmpty()) { + builder.field(PROCESSOR_RESULT_FIELD.getPreferredName(), processorResult); + } return builder; } @@ -173,6 +191,10 @@ public List getSearchExtBuilders() { return Collections.unmodifiableList(this.searchExtBuilders); } + public List getProcessorResult() { + return processorResult; + } + protected void writeTo(StreamOutput out) throws IOException { throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index 05465e32631fd..8e2fa8246ac1b 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -256,6 +256,9 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil if (request.hasParam("timeout")) { searchSourceBuilder.timeout(request.paramAsTime("timeout", null)); } + if (request.hasParam("verbose_pipeline")) { + searchSourceBuilder.verbosePipeline(request.paramAsBoolean("verbose_pipeline", false)); + } if (request.hasParam("terminate_after")) { int terminateAfter = request.paramAsInt("terminate_after", SearchContext.DEFAULT_TERMINATE_AFTER); if (terminateAfter < 0) { diff --git a/server/src/main/java/org/opensearch/search/SearchHits.java b/server/src/main/java/org/opensearch/search/SearchHits.java index 963ce82e636cf..4735180ecad36 100644 --- a/server/src/main/java/org/opensearch/search/SearchHits.java +++ b/server/src/main/java/org/opensearch/search/SearchHits.java @@ -37,6 +37,7 @@ import org.apache.lucene.search.TotalHits.Relation; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.Lucene; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -166,6 +167,22 @@ public SearchHit[] getHits() { return this.hits; } + /** + * Creates a deep copy of this SearchHits instance. + * + * @return a deep copy of the current SearchHits object + * @throws IOException if an I/O exception occurs during serialization or deserialization + */ + public SearchHits deepCopy() throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + this.writeTo(out); + + try (StreamInput in = out.bytes().streamInput()) { + return new SearchHits(in); + } + } + } + /** * Return the hit as the provided position. */ diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 9c438401b9fbe..fb21eaff5f857 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -136,6 +136,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R public static final ParseField SLICE = new ParseField("slice"); public static final ParseField POINT_IN_TIME = new ParseField("pit"); public static final ParseField SEARCH_PIPELINE = new ParseField("search_pipeline"); + public static final ParseField VERBOSE_SEARCH_PIPELINE = new ParseField("verbose_pipeline"); public static SearchSourceBuilder fromXContent(XContentParser parser) throws IOException { return fromXContent(parser, true); @@ -226,6 +227,8 @@ public static HighlightBuilder highlight() { private String searchPipeline; + private boolean verbosePipeline = false; + /** * Constructs a new search source builder. */ @@ -302,6 +305,9 @@ public SearchSourceBuilder(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_18_0)) { searchPipeline = in.readOptionalString(); } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + verbosePipeline = in.readBoolean(); + } } @Override @@ -385,6 +391,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_18_0)) { out.writeOptionalString(searchPipeline); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(verbosePipeline); + } } /** @@ -1142,6 +1151,26 @@ public SearchSourceBuilder pipeline(String searchPipeline) { return this; } + /** + * Enables or disables verbose mode for the search pipeline. + * + * When verbose mode is enabled, detailed information about each processor + * in the search pipeline is included in the search response. This includes + * the processor name, execution status, input, output, and time taken for processing. + * + * This parameter is primarily intended for debugging purposes, allowing users + * to track how data flows and transforms through the search pipeline. + * + */ + public SearchSourceBuilder verbosePipeline(Boolean verbosePipeline) { + this.verbosePipeline = verbosePipeline; + return this; + } + + public Boolean verbosePipeline() { + return verbosePipeline; + } + /** * Rewrites this search source builder into its primitive form. e.g. by * rewriting the QueryBuilder. If the builder did not change the identity @@ -1240,6 +1269,7 @@ private SearchSourceBuilder shallowCopy( rewrittenBuilder.derivedFieldsObject = derivedFieldsObject; rewrittenBuilder.derivedFields = derivedFields; rewrittenBuilder.searchPipeline = searchPipeline; + rewrittenBuilder.verbosePipeline = verbosePipeline; return rewrittenBuilder; } @@ -1309,6 +1339,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th profile = parser.booleanValue(); } else if (SEARCH_PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { searchPipeline = parser.text(); + } else if (VERBOSE_SEARCH_PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { + verbosePipeline = parser.booleanValue(); } else { throw new ParsingException( parser.getTokenLocation(), @@ -1642,6 +1674,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(SEARCH_PIPELINE.getPreferredName(), searchPipeline); } + if (verbosePipeline) { + builder.field(VERBOSE_SEARCH_PIPELINE.getPreferredName(), verbosePipeline); + } + return builder; } @@ -1920,7 +1956,8 @@ public int hashCode() { pointInTimeBuilder, derivedFieldsObject, derivedFields, - searchPipeline + searchPipeline, + verbosePipeline ); } @@ -1966,7 +2003,8 @@ public boolean equals(Object obj) { && Objects.equals(pointInTimeBuilder, other.pointInTimeBuilder) && Objects.equals(derivedFieldsObject, other.derivedFieldsObject) && Objects.equals(derivedFields, other.derivedFields) - && Objects.equals(searchPipeline, other.searchPipeline); + && Objects.equals(searchPipeline, other.searchPipeline) + && Objects.equals(verbosePipeline, other.verbosePipeline); } @Override diff --git a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java index c9d7b0084c1e1..c014cd2577662 100644 --- a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java +++ b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java @@ -42,6 +42,7 @@ import org.opensearch.search.SearchExtBuilder; import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.pipeline.ProcessorExecutionDetail; import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.search.suggest.Suggest; @@ -73,7 +74,17 @@ public InternalSearchResponse( Boolean terminatedEarly, int numReducePhases ) { - this(hits, aggregations, suggest, profileResults, timedOut, terminatedEarly, numReducePhases, Collections.emptyList()); + this( + hits, + aggregations, + suggest, + profileResults, + timedOut, + terminatedEarly, + numReducePhases, + Collections.emptyList(), + Collections.emptyList() + ); } public InternalSearchResponse( @@ -84,9 +95,20 @@ public InternalSearchResponse( boolean timedOut, Boolean terminatedEarly, int numReducePhases, - List searchExtBuilderList + List searchExtBuilderList, + List processorResult ) { - super(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases, searchExtBuilderList); + super( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + searchExtBuilderList, + processorResult + ); } public InternalSearchResponse(StreamInput in) throws IOException { @@ -98,7 +120,8 @@ public InternalSearchResponse(StreamInput in) throws IOException { in.readOptionalBoolean(), in.readOptionalWriteable(SearchProfileShardResults::new), in.readVInt(), - readSearchExtBuildersOnOrAfter(in) + readSearchExtBuildersOnOrAfter(in), + readProcessorResultOnOrAfter(in) ); } @@ -112,6 +135,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(profileResults); out.writeVInt(numReducePhases); writeSearchExtBuildersOnOrAfter(out, searchExtBuilders); + writeProcessorResultOnOrAfter(out, processorResult); } private static List readSearchExtBuildersOnOrAfter(StreamInput in) throws IOException { @@ -123,4 +147,15 @@ private static void writeSearchExtBuildersOnOrAfter(StreamOutput out, List readProcessorResultOnOrAfter(StreamInput in) throws IOException { + return (in.getVersion().onOrAfter(Version.V_3_0_0)) ? in.readList(ProcessorExecutionDetail::new) : Collections.emptyList(); + } + + private static void writeProcessorResultOnOrAfter(StreamOutput out, List processorResult) throws IOException { + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeList(processorResult); + } + } + } diff --git a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java index c88dfb2060393..e2d00834bd57d 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java @@ -144,7 +144,11 @@ void transformRequest(SearchRequest request, ActionListener reque ActionListener currentListener = finalListener; for (int i = searchRequestProcessors.size() - 1; i >= 0; i--) { final ActionListener nextListener = currentListener; - SearchRequestProcessor processor = searchRequestProcessors.get(i); + // Conditionally wrap the current processor with a TrackingSearchRequestProcessorWrapper + // if verbosePipeline mode is enabled. This allows detailed execution tracking for debugging purposes. + final SearchRequestProcessor processor = request.source().verbosePipeline() + ? new TrackingSearchRequestProcessorWrapper(searchRequestProcessors.get(i)) + : searchRequestProcessors.get(i); currentListener = ActionListener.wrap(r -> { long start = relativeTimeSupplier.getAsLong(); beforeRequestProcessor(processor); @@ -156,7 +160,9 @@ void transformRequest(SearchRequest request, ActionListener reque long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); afterRequestProcessor(processor, took); onRequestProcessorFailed(processor); - if (processor.isIgnoreFailure()) { + // When verbosePipeline is enabled, all processor failures are ignored to ensure the execution chain continues without + // interruption.TrackingSearchResponseProcessorWrapper will log all errors in detail for debugging purposes + if (processor.isIgnoreFailure() || r.source().verbosePipeline()) { logger.warn( "The exception from request processor [" + processor.getType() @@ -201,7 +207,6 @@ ActionListener transformResponseListener( PipelineProcessingContext requestContext ) { if (searchResponseProcessors.isEmpty()) { - // No response transformation necessary return responseListener; } @@ -222,8 +227,9 @@ ActionListener transformResponseListener( for (int i = searchResponseProcessors.size() - 1; i >= 0; i--) { final ActionListener currentFinalListener = responseListener; - final SearchResponseProcessor processor = searchResponseProcessors.get(i); - + final SearchResponseProcessor processor = request.source().verbosePipeline() + ? new TrackingSearchResponseProcessorWrapper(searchResponseProcessors.get(i)) + : searchResponseProcessors.get(i); responseListener = ActionListener.wrap(r -> { beforeResponseProcessor(processor); final long start = relativeTimeSupplier.getAsLong(); @@ -235,7 +241,9 @@ ActionListener transformResponseListener( onResponseProcessorFailed(processor); long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); afterResponseProcessor(processor, took); - if (processor.isIgnoreFailure()) { + // When verbosePipeline is enabled, all processor failures are ignored to ensure the execution chain continues without + // interruption.TrackingSearchResponseProcessorWrapper will log all errors in detail for debugging purposes + if (processor.isIgnoreFailure() || request.source().verbosePipeline()) { logger.warn( "The exception from response processor [" + processor.getType() diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java index a1f2b8b99d958..7e86c30ddbbd9 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java @@ -8,7 +8,10 @@ package org.opensearch.search.pipeline; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; /** @@ -16,6 +19,7 @@ */ public class PipelineProcessingContext { private final Map attributes = new HashMap<>(); + private final List processorExecutionDetails = new ArrayList<>(); /** * Set a generic attribute in the state for this request. Overwrites any existing value. @@ -35,4 +39,22 @@ public void setAttribute(String name, Object value) { public Object getAttribute(String name) { return attributes.get(name); } + + /** + * Add a ProcessorExecutionDetail to the list of execution details. + * + * @param detail the ProcessorExecutionDetail to add + */ + public void addProcessorExecutionDetail(ProcessorExecutionDetail detail) { + processorExecutionDetails.add(detail); + } + + /** + * Get all ProcessorExecutionDetails recorded in this context. + * + * @return a list of ProcessorExecutionDetails + */ + public List getProcessorExecutionDetails() { + return Collections.unmodifiableList(processorExecutionDetails); + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java index d550fbb768133..f5ce94946dd32 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java @@ -15,6 +15,8 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.search.SearchPhaseResult; +import java.util.List; + /** * Groups a search pipeline based on a request and the request after being transformed by the pipeline. * @@ -35,7 +37,15 @@ public void transformRequest(ActionListener requestListener) { } public ActionListener transformResponseListener(ActionListener responseListener) { - return pipeline.transformResponseListener(this, responseListener, requestContext); + return pipeline.transformResponseListener(this, ActionListener.wrap(response -> { + // Extract processor execution details + List details = requestContext.getProcessorExecutionDetails(); + // Add details to the response's InternalResponse if available + if (!details.isEmpty() && response.getInternalResponse() != null) { + response.getInternalResponse().getProcessorResult().addAll(details); + } + responseListener.onResponse(response); + }, responseListener::onFailure), requestContext); } public void transformSearchPhaseResults( diff --git a/server/src/main/java/org/opensearch/search/pipeline/ProcessorExecutionDetail.java b/server/src/main/java/org/opensearch/search/pipeline/ProcessorExecutionDetail.java new file mode 100644 index 0000000000000..61c627c3dc54c --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/ProcessorExecutionDetail.java @@ -0,0 +1,299 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.XContentUtils; +import org.opensearch.core.ParseField; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.ToXContentObject; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Detailed information about a processor execution in a search pipeline. + * + * @opensearch.api + */ +@PublicApi(since = "2.19.0") +public class ProcessorExecutionDetail implements Writeable, ToXContentObject { + + private final String processorName; + private long durationMillis; + private Object inputData; + private Object outputData; + private ProcessorStatus status; + private String errorMessage; + private String tag; + private static final ParseField PROCESSOR_NAME_FIELD = new ParseField("processor_name"); + private static final ParseField DURATION_MILLIS_FIELD = new ParseField("duration_millis"); + private static final ParseField INPUT_DATA_FIELD = new ParseField("input_data"); + private static final ParseField OUTPUT_DATA_FIELD = new ParseField("output_data"); + private static final ParseField STATUS_FIELD = new ParseField("status"); + private static final ParseField ERROR_MESSAGE_FIELD = new ParseField("error"); + private static final ParseField TAG_FIELD = new ParseField("tag"); + // Key for processor execution details + public static final String PROCESSOR_EXECUTION_DETAILS_KEY = "processorExecutionDetails"; + + /** + * Constructor for ProcessorExecutionDetail + */ + public ProcessorExecutionDetail( + String processorName, + long durationMillis, + Object inputData, + Object outputData, + ProcessorStatus status, + String errorMessage, + String tag + ) { + this.processorName = processorName; + this.durationMillis = durationMillis; + this.inputData = inputData; + this.outputData = outputData; + this.status = status; + this.errorMessage = errorMessage; + this.tag = tag; + } + + public ProcessorExecutionDetail(String processorName) { + this(processorName, 0, null, null, ProcessorStatus.SUCCESS, null, null); + } + + public ProcessorExecutionDetail(String processorName, String tag) { + this(processorName, 0, null, null, ProcessorStatus.SUCCESS, null, tag); + } + + public ProcessorExecutionDetail(StreamInput in) throws IOException { + this.processorName = in.readString(); + this.durationMillis = in.readLong(); + this.inputData = in.readGenericValue(); + this.outputData = in.readGenericValue(); + this.status = in.readEnum(ProcessorStatus.class); + this.errorMessage = in.readString(); + this.tag = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(processorName); + out.writeLong(durationMillis); + out.writeGenericValue(inputData); + out.writeGenericValue(outputData); + out.writeEnum(status); + out.writeString(errorMessage); + out.writeString(tag); + } + + public String getProcessorName() { + return processorName; + } + + public long getDurationMillis() { + return durationMillis; + } + + public Object getInputData() { + return inputData; + + } + + public Object getOutputData() { + return outputData; + } + + public void markProcessorAsFailed(ProcessorStatus status, String errorMessage) { + this.status = status; + this.errorMessage = errorMessage; + } + + public ProcessorStatus getStatus() { + return status; + } + + /** + * Adds or updates the input data for this processor execution detail. + * + * @param inputData the new input data + */ + public void addInput(Object inputData) { + this.inputData = inputData; + } + + /** + * Adds or updates the output data for this processor execution detail. + * + * @param outputData the new output data + */ + public void addOutput(Object outputData) { + this.outputData = outputData; + } + + /** + * Adds or updates the duration of the processor execution. + * + * @param durationMillis the new duration in milliseconds + */ + public void addTook(long durationMillis) { + this.durationMillis = durationMillis; + } + + /** + * Serializes the processor execution detail into XContent. + * Includes the error message only if the processor has failed. + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(PROCESSOR_NAME_FIELD.getPreferredName(), processorName); + // tag is optional when setting up processor + if (tag != null) { + builder.field(TAG_FIELD.getPreferredName(), tag); + } + builder.field(DURATION_MILLIS_FIELD.getPreferredName(), durationMillis); + builder.field(STATUS_FIELD.getPreferredName(), status.name().toLowerCase(Locale.ROOT)); + if (status == ProcessorStatus.FAIL) { + builder.field(ERROR_MESSAGE_FIELD.getPreferredName(), errorMessage); + } + addFieldToXContent(builder, INPUT_DATA_FIELD.getPreferredName(), inputData, params); + addFieldToXContent(builder, OUTPUT_DATA_FIELD.getPreferredName(), outputData, params); + + builder.endObject(); + return builder; + } + + private void addFieldToXContent(XContentBuilder builder, String fieldName, Object data, Params params) throws IOException { + if (data == null) { + builder.nullField(fieldName); + return; + } + + if (data instanceof List) { + builder.startArray(fieldName); + for (Object item : (List) data) { + writeItemToXContent(builder, item, params); + } + builder.endArray(); + } else if (data instanceof Map) { + builder.startObject(fieldName); + for (Map.Entry entry : ((Map) data).entrySet()) { + addFieldToXContent(builder, entry.getKey().toString(), entry.getValue(), params); + } + builder.endObject(); + } else if (data instanceof ToXContentObject) { + builder.field(fieldName); + ((ToXContentObject) data).toXContent(builder, params); + } else if (data instanceof String) { + // If the data is a String, attempt to parse it as JSON + String jsonString = (String) data; + try { + // check if its json string + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, jsonString) + ) { + Map parsedMap = parser.map(); + + builder.startObject(fieldName); + for (Map.Entry entry : parsedMap.entrySet()) { + addFieldToXContent(builder, entry.getKey(), entry.getValue(), params); + } + builder.endObject(); + } + } catch (IOException e) { + // If parsing fails, write the string as a plain field + builder.field(fieldName, jsonString); + } + } else { + builder.field(fieldName, data); + } + } + + private void writeItemToXContent(XContentBuilder builder, Object item, Params params) throws IOException { + if (item instanceof ToXContentObject) { + ((ToXContentObject) item).toXContent(builder, params); + } else { + builder.value(item); + } + } + + public static ProcessorExecutionDetail fromXContent(XContentParser parser) throws IOException { + String processorName = null; + long durationMillis = 0; + Object inputData = null; + Object outputData = null; + ProcessorStatus status = null; + String errorMessage = null; + String tag = null; + if (parser.currentToken() != XContentParser.Token.START_OBJECT) { + parser.nextToken(); + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); + } + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + String fieldName = parser.currentName(); + parser.nextToken(); + + if (PROCESSOR_NAME_FIELD.match(fieldName, parser.getDeprecationHandler())) { + processorName = parser.text(); + } else if (TAG_FIELD.match(fieldName, parser.getDeprecationHandler())) { + tag = parser.text(); + } else if (DURATION_MILLIS_FIELD.match(fieldName, parser.getDeprecationHandler())) { + durationMillis = parser.longValue(); + } else if (STATUS_FIELD.match(fieldName, parser.getDeprecationHandler())) { + status = ProcessorStatus.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } else if (ERROR_MESSAGE_FIELD.match(fieldName, parser.getDeprecationHandler())) { + errorMessage = parser.text(); + } else if (INPUT_DATA_FIELD.match(fieldName, parser.getDeprecationHandler())) { + inputData = XContentUtils.readValue(parser, parser.currentToken()); + } else if (OUTPUT_DATA_FIELD.match(fieldName, parser.getDeprecationHandler())) { + outputData = XContentUtils.readValue(parser, parser.currentToken()); + } else { + parser.skipChildren(); + } + } + + if (processorName == null) { + throw new IllegalArgumentException("Processor name is required"); + } + + return new ProcessorExecutionDetail(processorName, durationMillis, inputData, outputData, status, errorMessage, tag); + } + + @Override + public int hashCode() { + return Objects.hash(processorName, durationMillis, inputData, outputData, status, errorMessage, tag); + } + + /** + * Represents the status of a processor in the search pipeline. + * + *

This enum is used to indicate whether a processor has executed successfully + * or encountered a failure during its execution. It helps in categorizing the + * execution result of processors within a pipeline. + * + */ + @PublicApi(since = "2.19.0") + public enum ProcessorStatus { + SUCCESS, + FAIL + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 012d6695c042b..27b837740c0ca 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -367,7 +367,6 @@ static ClusterState innerDelete(DeleteSearchPipelineRequest request, ClusterStat public PipelinedRequest resolvePipeline(SearchRequest searchRequest, IndexNameExpressionResolver indexNameExpressionResolver) { Pipeline pipeline = Pipeline.NO_OP_PIPELINE; - if (searchRequest.source() != null && searchRequest.source().searchPipelineSource() != null) { // Pipeline defined in search request (ad hoc pipeline). if (searchRequest.pipeline() != null) { @@ -426,6 +425,9 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest, IndexNameEx pipeline = pipelineHolder.pipeline; } } + if (searchRequest.source() != null && searchRequest.source().verbosePipeline() && pipeline.equals(Pipeline.NO_OP_PIPELINE)) { + throw new IllegalArgumentException("The 'verbose pipeline' option requires a search pipeline to be defined."); + } PipelineProcessingContext requestContext = new PipelineProcessingContext(); return new PipelinedRequest(pipeline, searchRequest, requestContext); } diff --git a/server/src/main/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapper.java b/server/src/main/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapper.java new file mode 100644 index 0000000000000..594b3b5e382db --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapper.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.action.ActionListener; + +/** + * Wrapper for SearchRequestProcessor to track execution details. + * + * @opensearch.internal + */ +public class TrackingSearchRequestProcessorWrapper implements SearchRequestProcessor { + + private final SearchRequestProcessor wrappedProcessor; + + /** + * Constructor for the wrapper. + * + * @param wrappedProcessor the actual processor to be wrapped + */ + public TrackingSearchRequestProcessorWrapper(SearchRequestProcessor wrappedProcessor) { + this.wrappedProcessor = wrappedProcessor; + } + + @Override + public String getType() { + return wrappedProcessor.getType(); + } + + @Override + public String getTag() { + return wrappedProcessor.getTag(); + } + + @Override + public String getDescription() { + return wrappedProcessor.getDescription(); + } + + @Override + public boolean isIgnoreFailure() { + return wrappedProcessor.isIgnoreFailure(); + } + + @Override + public SearchRequest processRequest(SearchRequest request) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + public SearchRequest processRequest(SearchRequest request, PipelineProcessingContext requestContext) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + public void processRequestAsync( + SearchRequest request, + PipelineProcessingContext requestContext, + ActionListener requestListener + ) { + ProcessorExecutionDetail detail = new ProcessorExecutionDetail(getType(), getTag()); + long start = System.nanoTime(); + detail.addInput(request.source().toString()); + wrappedProcessor.processRequestAsync(request, requestContext, ActionListener.wrap(result -> { + detail.addOutput(result.source().toString()); + long took = System.nanoTime() - start; + detail.addTook(took); + requestContext.addProcessorExecutionDetail(detail); + requestListener.onResponse(result); + }, e -> { + detail.markProcessorAsFailed(ProcessorExecutionDetail.ProcessorStatus.FAIL, e.getMessage()); + requestContext.addProcessorExecutionDetail(detail); + requestListener.onFailure(e); + })); + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapper.java b/server/src/main/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapper.java new file mode 100644 index 0000000000000..51b0a5ebc7103 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapper.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Wrapper for SearchResponseProcessor to track execution details. + * + * @opensearch.internal + */ +public class TrackingSearchResponseProcessorWrapper implements SearchResponseProcessor { + + private final SearchResponseProcessor wrappedProcessor; + + /** + * Constructor for the wrapper. + * + * @param wrappedProcessor the actual processor to be wrapped + */ + public TrackingSearchResponseProcessorWrapper(SearchResponseProcessor wrappedProcessor) { + if (wrappedProcessor == null) { + throw new IllegalArgumentException("Wrapped processor cannot be null."); + } + this.wrappedProcessor = wrappedProcessor; + } + + @Override + public String getType() { + return wrappedProcessor.getType(); + } + + @Override + public String getTag() { + return wrappedProcessor.getTag(); + } + + @Override + public String getDescription() { + return wrappedProcessor.getDescription(); + } + + @Override + public boolean isIgnoreFailure() { + return wrappedProcessor.isIgnoreFailure(); + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response) throws Exception { + throw new UnsupportedOperationException(); + } + + @Override + public SearchResponse processResponse(SearchRequest request, SearchResponse response, PipelineProcessingContext requestContext) { + throw new UnsupportedOperationException(); + } + + @Override + public void processResponseAsync( + SearchRequest request, + SearchResponse response, + PipelineProcessingContext requestContext, + ActionListener responseListener + ) { + ProcessorExecutionDetail detail = new ProcessorExecutionDetail(getType(), getTag()); + long start = System.nanoTime(); + try { + detail.addInput(Arrays.asList(response.getHits().deepCopy().getHits())); + } catch (IOException e) { + responseListener.onFailure(e); + return; + } + wrappedProcessor.processResponseAsync(request, response, requestContext, ActionListener.wrap(result -> { + detail.addOutput(Arrays.asList(result.getHits().deepCopy().getHits())); + long took = System.nanoTime() - start; + detail.addTook(took); + requestContext.addProcessorExecutionDetail(detail); + responseListener.onResponse(result); + }, e -> { + detail.markProcessorAsFailed(ProcessorExecutionDetail.ProcessorStatus.FAIL, e.getMessage()); + requestContext.addProcessorExecutionDetail(detail); + responseListener.onFailure(e); + })); + } + +} diff --git a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java index f5a53be43f267..81a22b2a5aa39 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchResponseTests.java @@ -62,6 +62,7 @@ import org.opensearch.search.aggregations.AggregationsTests; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.pipeline.ProcessorExecutionDetail; import org.opensearch.search.profile.SearchProfileShardResults; import org.opensearch.search.profile.SearchProfileShardResultsTests; import org.opensearch.search.suggest.Suggest; @@ -76,6 +77,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.UUID; @@ -176,7 +178,8 @@ public SearchResponse createTestItem( timedOut, terminatedEarly, numReducePhases, - searchExtBuilders + searchExtBuilders, + Collections.emptyList() ); } else { internalSearchResponse = InternalSearchResponse.empty(); @@ -311,6 +314,26 @@ public void testToXContent() { hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; String dummyId = UUID.randomUUID().toString(); + List processorResults = List.of( + new ProcessorExecutionDetail( + "processor1", + 50, + List.of(1), + List.of(1), + ProcessorExecutionDetail.ProcessorStatus.SUCCESS, + null, + null + ), + new ProcessorExecutionDetail( + "processor2", + 30, + List.of(3), + List.of(3), + ProcessorExecutionDetail.ProcessorStatus.SUCCESS, + null, + null + ) + ); { SearchResponse response = new SearchResponse( new InternalSearchResponse( @@ -321,7 +344,8 @@ public void testToXContent() { false, null, 1, - List.of(new DummySearchExtBuilder(dummyId)) + List.of(new DummySearchExtBuilder(dummyId)), + processorResults ), null, 0, @@ -354,6 +378,22 @@ public void testToXContent() { { expectedString.append("{\"dummy\":\"" + dummyId + "\"}"); } + expectedString.append(",\"processor_results\":"); + expectedString.append("["); + for (int i = 0; i < processorResults.size(); i++) { + ProcessorExecutionDetail detail = processorResults.get(i); + expectedString.append("{"); + expectedString.append("\"processor_name\":\"").append(detail.getProcessorName()).append("\","); + expectedString.append("\"duration_millis\":").append(detail.getDurationMillis()).append(","); + expectedString.append("\"status\":\"").append(detail.getStatus().toString().toLowerCase(Locale.ROOT)).append("\","); + expectedString.append("\"input_data\":").append(detail.getInputData()).append(","); + expectedString.append("\"output_data\":").append(detail.getOutputData()); + expectedString.append("}"); + if (i < processorResults.size() - 1) { + expectedString.append(","); + } + } + expectedString.append("]"); } expectedString.append("}"); assertEquals(expectedString.toString(), Strings.toString(MediaTypeRegistry.JSON, response)); diff --git a/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java b/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java index cc003f2ca0e57..74ab8c22c6b57 100644 --- a/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/GenericSearchExtBuilderTests.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -251,7 +252,8 @@ public SearchResponse createTestItem( timedOut, terminatedEarly, numReducePhases, - searchExtBuilders + searchExtBuilders, + Collections.emptyList() ); } else { internalSearchResponse = InternalSearchResponse.empty(); diff --git a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java index da8ccc9e121e0..90962a5c613f1 100644 --- a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java @@ -703,6 +703,30 @@ public void testParseFromAndSize() throws IOException { } } + public void testVerbosePipeline() throws IOException { + { + String restContent = "{ \"verbose_pipeline\": true }"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + assertTrue(searchSourceBuilder.verbosePipeline()); + } + } + { + String restContent = "{ \"verbose_pipeline\": false }"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + assertFalse(searchSourceBuilder.verbosePipeline()); + } + } + { + String restContent = "{ \"query\": { \"match_all\": {} } }"; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + assertFalse(searchSourceBuilder.verbosePipeline()); + } + } + } + private void assertIndicesBoostParseErrorMessage(String restContent, String expectedErrorMessage) throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { ParsingException e = expectThrows(ParsingException.class, () -> SearchSourceBuilder.fromXContent(parser)); diff --git a/server/src/test/java/org/opensearch/search/pipeline/ProcessorExecutionDetailTests.java b/server/src/test/java/org/opensearch/search/pipeline/ProcessorExecutionDetailTests.java new file mode 100644 index 0000000000000..0a5b62add6aa1 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pipeline/ProcessorExecutionDetailTests.java @@ -0,0 +1,187 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class ProcessorExecutionDetailTests extends OpenSearchTestCase { + + public void testSerializationRoundtrip() throws IOException { + ProcessorExecutionDetail detail = new ProcessorExecutionDetail( + "testProcessor", + 123L, + Map.of("key", "value"), + List.of(1, 2, 3), + ProcessorExecutionDetail.ProcessorStatus.SUCCESS, + "", + "" + ); + ProcessorExecutionDetail deserialized; + try (BytesStreamOutput output = new BytesStreamOutput()) { + detail.writeTo(output); + try (StreamInput input = output.bytes().streamInput()) { + deserialized = new ProcessorExecutionDetail(input); + } + } + assertEquals("testProcessor", deserialized.getProcessorName()); + assertEquals(123L, deserialized.getDurationMillis()); + assertEquals(Map.of("key", "value"), deserialized.getInputData()); + assertEquals(List.of(1, 2, 3), deserialized.getOutputData()); + } + + public void testAddMethods() { + ProcessorExecutionDetail detail = new ProcessorExecutionDetail("testProcessor"); + detail.addTook(456L); + detail.addInput(Map.of("newKey", "newValue")); + detail.addOutput(List.of(4, 5, 6)); + assertEquals(456L, detail.getDurationMillis()); + assertEquals(Map.of("newKey", "newValue"), detail.getInputData()); + assertEquals(List.of(4, 5, 6), detail.getOutputData()); + } + + public void testToXContent() throws IOException { + ProcessorExecutionDetail detail = new ProcessorExecutionDetail( + "testProcessor", + 123L, + Map.of("key1", "value1"), + List.of(1, 2, 3), + ProcessorExecutionDetail.ProcessorStatus.SUCCESS, + "", + null + ); + + XContentBuilder actualBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + detail.toXContent(actualBuilder, ToXContent.EMPTY_PARAMS); + + String expected = "{" + + " \"processor_name\": \"testProcessor\"," + + " \"duration_millis\": 123," + + " \"status\": \"success\"," + + " \"input_data\": {\"key1\": \"value1\"}," + + " \"output_data\": [1, 2, 3]" + + "}"; + + XContentParser expectedParser = JsonXContent.jsonXContent.createParser( + this.xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + expected + ); + XContentBuilder expectedBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + expectedBuilder.generator().copyCurrentStructure(expectedParser); + + assertEquals( + XContentHelper.convertToMap(BytesReference.bytes(expectedBuilder), false, (MediaType) MediaTypeRegistry.JSON), + XContentHelper.convertToMap(BytesReference.bytes(actualBuilder), false, (MediaType) MediaTypeRegistry.JSON) + ); + } + + public void testToXContentWithProcessorError() throws IOException { + ProcessorExecutionDetail detail = new ProcessorExecutionDetail( + "testProcessor", + 123L, + Map.of("key1", "value1"), + List.of(1, 2, 3), + ProcessorExecutionDetail.ProcessorStatus.FAIL, + "processor 1 fail", + "123" + ); + + XContentBuilder actualBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + detail.toXContent(actualBuilder, ToXContent.EMPTY_PARAMS); + + String expected = "{" + + " \"processor_name\": \"testProcessor\"," + + " \"tag\": \"123\"," + + " \"duration_millis\": 123," + + " \"status\": \"fail\"," + + " \"error\": \"processor 1 fail\"," + + " \"input_data\": {\"key1\": \"value1\"}," + + " \"output_data\": [1, 2, 3]" + + "}"; + + XContentParser expectedParser = JsonXContent.jsonXContent.createParser( + this.xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + expected + ); + XContentBuilder expectedBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + expectedBuilder.generator().copyCurrentStructure(expectedParser); + + assertEquals( + XContentHelper.convertToMap(BytesReference.bytes(expectedBuilder), false, (MediaType) MediaTypeRegistry.JSON), + XContentHelper.convertToMap(BytesReference.bytes(actualBuilder), false, (MediaType) MediaTypeRegistry.JSON) + ); + } + + public void testFromXContent() throws IOException { + String json = "{" + + " \"processor_name\": \"testProcessor\"," + + " \"duration_millis\": 123," + + " \"input_data\": {\"key1\": \"value1\"}," + + " \"output_data\": [1, 2, 3]" + + "}"; + + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + this.xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + json + ) + ) { + ProcessorExecutionDetail detail = ProcessorExecutionDetail.fromXContent(parser); + + assertEquals("testProcessor", detail.getProcessorName()); + assertEquals(123L, detail.getDurationMillis()); + assertEquals(Map.of("key1", "value1"), detail.getInputData()); + assertEquals(List.of(1, 2, 3), detail.getOutputData()); + } + } + + public void testFromXContentWithPRocessorError() throws IOException { + String json = "{" + + " \"processor_name\": \"testProcessor\"," + + " \"duration_millis\": 123," + + " \"status\": \"fail\"," + + " \"error\": \"processor 1 fail\"," + + " \"input_data\": {\"key1\": \"value1\"}," + + " \"output_data\": [1, 2, 3]" + + "}"; + + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + this.xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + json + ) + ) { + ProcessorExecutionDetail detail = ProcessorExecutionDetail.fromXContent(parser); + + assertEquals("testProcessor", detail.getProcessorName()); + assertEquals(123L, detail.getDurationMillis()); + assertEquals(Map.of("key1", "value1"), detail.getInputData()); + assertEquals(List.of(1, 2, 3), detail.getOutputData()); + assertEquals(ProcessorExecutionDetail.ProcessorStatus.FAIL, detail.getStatus()); + } + } +} diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index b52205996f34b..9668bf57db40f 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -669,12 +669,14 @@ public void testTransformResponse() throws Exception { // First try without specifying a pipeline, which should be a no-op. SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(createDefaultSearchSourceBuilder()); PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver); SearchResponse notTransformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertSame(searchResponse, notTransformedResponse); // Now apply a pipeline searchRequest = new SearchRequest().pipeline("p1"); + searchRequest.source(createDefaultSearchSourceBuilder()); pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver); SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); assertEquals(size, transformedResponse.getHits().getHits().length); @@ -762,6 +764,7 @@ public void testTransformSearchPhase() { // First try without specifying a pipeline, which should be a no-op. SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(createDefaultSearchSourceBuilder()); PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver); AtomicArray notTransformedSearchPhaseResults = searchPhaseResults.getAtomicArray(); pipelinedRequest.transformSearchPhaseResults( @@ -774,6 +777,7 @@ public void testTransformSearchPhase() { // Now set the pipeline as p1 searchRequest = new SearchRequest().pipeline("p1"); + searchRequest.source(createDefaultSearchSourceBuilder()); pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver); pipelinedRequest.transformSearchPhaseResults( @@ -792,6 +796,7 @@ public void testTransformSearchPhase() { // Check Processor doesn't run for between other phases searchRequest = new SearchRequest().pipeline("p1"); + searchRequest.source(createDefaultSearchSourceBuilder()); pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver); AtomicArray notTransformedSearchPhaseResult = searchPhaseResults.getAtomicArray(); pipelinedRequest.transformSearchPhaseResults( @@ -1105,7 +1110,7 @@ public void testExceptionOnResponseProcessing() throws Exception { PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver); - SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); + SearchResponse response = createDefaultSearchResponse(); // Exception thrown when processing response expectThrows(SearchPipelineProcessingException.class, () -> syncTransformResponse(pipelinedRequest, response)); } @@ -1169,7 +1174,7 @@ public void testCatchExceptionOnResponseProcessing() throws Exception { PipelinedRequest pipelinedRequest = searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver); - SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); + SearchResponse response = createDefaultSearchResponse(); // Caught Exception thrown when processing response and produced warn level logging message try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(Pipeline.class))) { @@ -1209,7 +1214,8 @@ public void testStats() throws Exception { SearchPipelineService searchPipelineService = getSearchPipelineService(requestProcessors, responseProcessors); SearchRequest request = new SearchRequest(); - SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); + request.source(createDefaultSearchSourceBuilder()); + SearchResponse response = createDefaultSearchResponse(); syncExecutePipeline( searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline"), indexNameExpressionResolver), @@ -1307,7 +1313,8 @@ public void testStatsEnabledIgnoreFailure() throws Exception { ); SearchRequest request = new SearchRequest(); - SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); + request.source(createDefaultSearchSourceBuilder()); + SearchResponse response = createDefaultSearchResponse(); syncExecutePipeline( searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline"), indexNameExpressionResolver), @@ -1578,9 +1585,12 @@ public void testStatefulProcessors() throws Exception { .build(); searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousState)); - PipelinedRequest request = searchPipelineService.resolvePipeline(new SearchRequest().pipeline("p1"), indexNameExpressionResolver); + PipelinedRequest request = searchPipelineService.resolvePipeline( + new SearchRequest().source(createDefaultSearchSourceBuilder()).pipeline("p1"), + indexNameExpressionResolver + ); assertNull(contextHolder.get()); - syncExecutePipeline(request, new SearchResponse(null, null, 0, 0, 0, 0, null, null)); + syncExecutePipeline(request, createDefaultSearchResponse()); assertNotNull(contextHolder.get()); assertEquals("b", contextHolder.get()); } @@ -1757,4 +1767,184 @@ public void testInvalidIndexResolveIndexDefaultPipeline() throws Exception { assertEquals(5, pipelinedRequest.source().size()); } + public void testVerbosePipelineExecution() throws Exception { + SearchPipelineService searchPipelineService = createWithProcessors(); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "verbose_pipeline", + new PipelineConfiguration( + "verbose_pipeline", + new BytesArray( + "{" + + "\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : 2 } } ]," + + "\"response_processors\": [ { \"fixed_score\": { \"score\": 5.0 } } ]" + + "}" + ), + MediaTypeRegistry.JSON + ) + ) + ); + + ClusterState initialState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState updatedState = ClusterState.builder(initialState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + + searchPipelineService.applyClusterState(new ClusterChangedEvent("clusterStateUpdated", updatedState, initialState)); + + SearchRequest searchRequest = new SearchRequest().source(SearchSourceBuilder.searchSource().size(10)).pipeline("verbose_pipeline"); + searchRequest.source().verbosePipeline(true); + + PipelinedRequest pipelinedRequest = syncTransformRequest( + searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver) + ); + + SearchResponseSections sections = new SearchResponseSections( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0.0f), + null, + null, + false, + null, + null, + 1, + List.of(), + List.of() + ); + + SearchResponse searchResponse = new SearchResponse(sections, null, 0, 0, 0, 0, null, null); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); + List executionDetails = transformedResponse.getInternalResponse().getProcessorResult(); + + assertNotNull(executionDetails); + assertEquals(2, executionDetails.size()); + assertEquals("scale_request_size", executionDetails.get(0).getProcessorName()); + assertEquals("fixed_score", executionDetails.get(1).getProcessorName()); + } + + public void testVerbosePipelineWithoutDefinedPipelineThrowsException() { + SearchPipelineService searchPipelineService = createWithProcessors(); + + SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(SearchSourceBuilder.searchSource().verbosePipeline(true)); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver) + ); + assertTrue(e.getMessage(), e.getMessage().contains("The 'verbose pipeline' option requires a search pipeline to be defined.")); + } + + public void testVerbosePipelineWithRequestProcessorOnly() throws Exception { + SearchPipelineService searchPipelineService = createWithProcessors(); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "request_only_pipeline", + new PipelineConfiguration( + "request_only_pipeline", + new BytesArray("{" + "\"request_processors\" : [ { \"scale_request_size\": { \"scale\" : 2 } } ]" + "}"), + MediaTypeRegistry.JSON + ) + ) + ); + + ClusterState initialState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState updatedState = ClusterState.builder(initialState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + + searchPipelineService.applyClusterState(new ClusterChangedEvent("clusterStateUpdated", updatedState, initialState)); + + SearchRequest searchRequest = new SearchRequest().source(SearchSourceBuilder.searchSource().size(10)) + .pipeline("request_only_pipeline"); + searchRequest.source().verbosePipeline(true); + + PipelinedRequest pipelinedRequest = syncTransformRequest( + searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver) + ); + + SearchResponseSections sections = new SearchResponseSections( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0.0f), + null, + null, + false, + null, + null, + 1, + List.of(), + List.of() + ); + + SearchResponse searchResponse = new SearchResponse(sections, null, 0, 0, 0, 0, null, null); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); + List executionDetails = transformedResponse.getInternalResponse().getProcessorResult(); + + assertNotNull(executionDetails); + assertEquals(1, executionDetails.size()); + assertEquals("scale_request_size", executionDetails.get(0).getProcessorName()); + } + + public void testVerbosePipelineWithResponseProcessorOnly() throws Exception { + SearchPipelineService searchPipelineService = createWithProcessors(); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "response_only_pipeline", + new PipelineConfiguration( + "response_only_pipeline", + new BytesArray("{" + "\"response_processors\": [ { \"fixed_score\": { \"score\": 5.0 } } ]" + "}"), + MediaTypeRegistry.JSON + ) + ) + ); + + ClusterState initialState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState updatedState = ClusterState.builder(initialState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + + searchPipelineService.applyClusterState(new ClusterChangedEvent("clusterStateUpdated", updatedState, initialState)); + + SearchRequest searchRequest = new SearchRequest().source(SearchSourceBuilder.searchSource().size(10)) + .pipeline("response_only_pipeline"); + searchRequest.source().verbosePipeline(true); + + PipelinedRequest pipelinedRequest = syncTransformRequest( + searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver) + ); + + SearchResponseSections sections = new SearchResponseSections( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0.0f), + null, + null, + false, + null, + null, + 1, + List.of(), + List.of() + ); + + SearchResponse searchResponse = new SearchResponse(sections, null, 0, 0, 0, 0, null, null); + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); + List executionDetails = transformedResponse.getInternalResponse().getProcessorResult(); + + assertNotNull(executionDetails); + assertEquals(1, executionDetails.size()); + assertEquals("fixed_score", executionDetails.get(0).getProcessorName()); + } + + private SearchSourceBuilder createDefaultSearchSourceBuilder() { + return SearchSourceBuilder.searchSource().size(10); + } + + private SearchResponse createDefaultSearchResponse() { + SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0.0f); + + SearchResponseSections sections = new SearchResponseSections(searchHits, null, null, false, null, null, 1, List.of(), List.of()); + + return new SearchResponse(sections, null, 0, 0, 0, 0, null, null); + } + } diff --git a/server/src/test/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapperTests.java b/server/src/test/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapperTests.java new file mode 100644 index 0000000000000..adbbc9efc0686 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pipeline/TrackingSearchRequestProcessorWrapperTests.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; + +public class TrackingSearchRequestProcessorWrapperTests extends OpenSearchTestCase { + private SearchRequestProcessor mockProcessor; + private TrackingSearchRequestProcessorWrapper wrapper; + private PipelineProcessingContext context; + + @Before + public void setUp() throws Exception { + super.setUp(); + mockProcessor = Mockito.mock(SearchRequestProcessor.class); + wrapper = new TrackingSearchRequestProcessorWrapper(mockProcessor); + context = new PipelineProcessingContext(); + } + + public void testProcessRequestAsyncSuccess() { + SearchRequest inputRequest = new SearchRequest(); + inputRequest.source(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery())); + + SearchRequest outputRequest = new SearchRequest(); + outputRequest.source(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); + + doAnswer(invocation -> { + ActionListener listener = invocation.getArgument(2); + listener.onResponse(outputRequest); + return null; + }).when(mockProcessor).processRequestAsync(any(SearchRequest.class), eq(context), any()); + + ActionListener listener = ActionListener.wrap(response -> { + assertEquals(outputRequest, response); + ProcessorExecutionDetail detail = context.getProcessorExecutionDetails().get(0); + assertEquals(wrapper.getType(), detail.getProcessorName()); + assertEquals(ProcessorExecutionDetail.ProcessorStatus.SUCCESS, detail.getStatus()); + }, e -> fail("Unexpected exception: " + e.getMessage())); + + wrapper.processRequestAsync(inputRequest, context, listener); + } + +} diff --git a/server/src/test/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapperTests.java b/server/src/test/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapperTests.java new file mode 100644 index 0000000000000..4075274948d30 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pipeline/TrackingSearchResponseProcessorWrapperTests.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.core.action.ActionListener; +import org.opensearch.search.SearchHits; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import org.mockito.Mockito; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class TrackingSearchResponseProcessorWrapperTests extends OpenSearchTestCase { + private SearchResponseProcessor mockProcessor; + private TrackingSearchResponseProcessorWrapper wrapper; + private PipelineProcessingContext context; + + @Before + public void setUp() throws Exception { + super.setUp(); + mockProcessor = Mockito.mock(SearchResponseProcessor.class); + wrapper = new TrackingSearchResponseProcessorWrapper(mockProcessor); + context = new PipelineProcessingContext(); + } + + public void testConstructorThrowsExceptionWhenProcessorIsNull() { + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> new TrackingSearchResponseProcessorWrapper(null) + ); + + assertEquals("Wrapped processor cannot be null.", exception.getMessage()); + } + + public void testProcessResponseAsync() { + SearchRequest mockRequest = new SearchRequest(); + SearchResponse inputResponse = Mockito.mock(SearchResponse.class); + SearchResponse outputResponse = Mockito.mock(SearchResponse.class); + + when(inputResponse.getHits()).thenReturn(SearchHits.empty()); + when(outputResponse.getHits()).thenReturn(SearchHits.empty()); + + wrapper.processResponseAsync(mockRequest, inputResponse, context, new ActionListener<>() { + @Override + public void onResponse(SearchResponse result) { + assertEquals(outputResponse, result); + assertFalse(context.getProcessorExecutionDetails().isEmpty()); + ProcessorExecutionDetail detail = context.getProcessorExecutionDetails().get(0); + assertEquals(wrapper.getType(), detail.getProcessorName()); + assertNotNull(detail.getInputData()); + assertNotNull(detail.getOutputData()); + assertEquals(ProcessorExecutionDetail.ProcessorStatus.SUCCESS, detail.getStatus()); + } + + @Override + public void onFailure(Exception e) { + fail("Should not trigger failure"); + } + }); + + verify(mockProcessor).processResponseAsync(eq(mockRequest), eq(inputResponse), eq(context), any()); + } +} From c6dfc65ea0bccc9cfe66bc4248d09b42d7430d0e Mon Sep 17 00:00:00 2001 From: kkewwei Date: Fri, 24 Jan 2025 02:35:02 +0800 Subject: [PATCH 03/48] Fix exists queries on nested flat_object fields throw exception (#16803) Signed-off-by: kkewwei Signed-off-by: kkewwei --- CHANGELOG.md | 1 + .../test/index/90_flat_object.yml | 48 +++++++++++++++++-- .../index/mapper/FlatObjectFieldMapper.java | 9 +++- .../mapper/FlatObjectFieldMapperTests.java | 12 +++++ 4 files changed, 65 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 499405ac508e4..5e022a3909731 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -109,6 +109,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) - The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) - Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) +- Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml index 2a469aa5ff04d..3966b59c4a045 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml @@ -16,7 +16,10 @@ setup: type : "flat_object" required_matches: type : "long" - + infos: + properties: + info: + type: "flat_object" - do: index: index: test @@ -60,7 +63,12 @@ setup: "review": [["bad",30.41],["ok",80.0]], "publishDate": "2016-01-01" }, - "required_matches": 1 + "required_matches": 1, + "infos": { + "info":{ + "name": "name1" + } + } } # Do index refresh - do: @@ -73,6 +81,40 @@ teardown: - do: indices.delete: index: test + +--- +"Exist query in root field": + - skip: + version: "- 2.99.99" + reason: "the query would throw exception prior to 2.99.99" + + - do: + search: + body: { + _source: true, + size: 10, + query: { + exists: { + field: "catalog" + } + } + } + - length: { hits.hits: 2 } + + - do: + search: + body: { + _source: true, + size: 10, + query: { + exists: { + field: "infos" + } + } + } + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.infos.info.name: "name1" } + --- "Invalid docs": - skip: @@ -135,7 +177,7 @@ teardown: - match: { test.mappings.properties.catalog.type: flat_object } - match: { test.mappings.properties.required_matches.type: long } # https://github.com/opensearch-project/OpenSearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test#length - - length: { test.mappings.properties: 3 } + - length: { test.mappings.properties: 4 } - length: { test.mappings.properties.catalog: 1 } --- diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index b921afb3157a8..4425e4e5b0b39 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -93,7 +93,12 @@ public static class Defaults { @Override public MappedFieldType keyedFieldType(String key) { - return new FlatObjectFieldType(this.name() + DOT_SYMBOL + key, this.name(), valueFieldType, valueAndPathFieldType); + return new FlatObjectFieldType( + Strings.isNullOrEmpty(key) ? this.name() : (this.name() + DOT_SYMBOL + key), + this.name(), + valueFieldType, + valueAndPathFieldType + ); } /** @@ -177,7 +182,7 @@ public FlatObjectFieldType( new TextSearchInfo(Defaults.FIELD_TYPE, null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER), Collections.emptyMap() ); - assert rootFieldName == null || (name.length() > rootFieldName.length() && name.startsWith(rootFieldName)); + assert rootFieldName == null || (name.length() >= rootFieldName.length() && name.startsWith(rootFieldName)); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; this.rootFieldName = rootFieldName; diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java index 118f58cf5e855..adb2c8b1ffe2a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldMapperTests.java @@ -17,6 +17,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.opensearch.common.TriFunction; +import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; import org.opensearch.core.xcontent.ToXContent; @@ -27,6 +28,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; import static org.opensearch.index.mapper.FlatObjectFieldMapper.CONTENT_TYPE; import static org.opensearch.index.mapper.FlatObjectFieldMapper.VALUE_AND_PATH_SUFFIX; @@ -397,7 +399,17 @@ public void testFetchDocValues() throws IOException { Throwable throwable = assertThrows(IllegalArgumentException.class, () -> ft.docValueFormat(null, null)); assertEquals("Field [field] of type [flat_object] does not support doc_value in root field", throwable.getMessage()); } + } + + public void testPatternMatch() throws IOException { + MapperService mapperService = createMapperService( + fieldMapping(b -> b.startObject("properties").startObject("foo").field("type", "flat_object").endObject().endObject()) + ); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + Set fields = queryShardContext.simpleMatchToIndexNames("field.*"); + assertEquals(1, fields.size()); + assertEquals(Sets.newHashSet("field.foo"), fields); } @Override From 94dfe6da7a0e2e436c32ea87d85e7082f954b4f9 Mon Sep 17 00:00:00 2001 From: Junwei Dai <59641585+junweid62@users.noreply.github.com> Date: Thu, 23 Jan 2025 15:23:10 -0800 Subject: [PATCH 04/48] Updates version to fix BWC for SearchSourceBuilder (#17098) * change version to 2.19 Signed-off-by: Junwei Dai * bring back the constructor InternalSearchResponse and SearchResponseSections Signed-off-by: Junwei Dai --------- Signed-off-by: Junwei Dai Co-authored-by: Junwei Dai --- .../action/search/SearchResponseSections.java | 23 +++++++++++++++ .../search/builder/SearchSourceBuilder.java | 4 +-- .../internal/InternalSearchResponse.java | 28 +++++++++++++++++-- 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java index 5eb305d91ee04..f93508b0ba55b 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/opensearch/action/search/SearchResponseSections.java @@ -121,6 +121,29 @@ public SearchResponseSections( this.searchExtBuilders.addAll(Objects.requireNonNull(searchExtBuilders, "searchExtBuilders must not be null")); } + public SearchResponseSections( + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileShardResults profileResults, + int numReducePhases, + List searchExtBuilders + ) { + this( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + searchExtBuilders, + Collections.emptyList() + ); + } + public final boolean timedOut() { return this.timedOut; } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index fb21eaff5f857..90dfc1e086602 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -305,7 +305,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_18_0)) { searchPipeline = in.readOptionalString(); } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { verbosePipeline = in.readBoolean(); } } @@ -391,7 +391,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_18_0)) { out.writeOptionalString(searchPipeline); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { out.writeBoolean(verbosePipeline); } } diff --git a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java index c014cd2577662..29c8826f76957 100644 --- a/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java +++ b/server/src/main/java/org/opensearch/search/internal/InternalSearchResponse.java @@ -111,6 +111,30 @@ public InternalSearchResponse( ); } + public InternalSearchResponse( + SearchHits hits, + InternalAggregations aggregations, + Suggest suggest, + SearchProfileShardResults profileResults, + boolean timedOut, + Boolean terminatedEarly, + int numReducePhases, + List searchExtBuilderList + + ) { + super( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + searchExtBuilderList, + Collections.emptyList() + ); + } + public InternalSearchResponse(StreamInput in) throws IOException { super( new SearchHits(in), @@ -149,11 +173,11 @@ private static void writeSearchExtBuildersOnOrAfter(StreamOutput out, List readProcessorResultOnOrAfter(StreamInput in) throws IOException { - return (in.getVersion().onOrAfter(Version.V_3_0_0)) ? in.readList(ProcessorExecutionDetail::new) : Collections.emptyList(); + return (in.getVersion().onOrAfter(Version.V_2_19_0)) ? in.readList(ProcessorExecutionDetail::new) : Collections.emptyList(); } private static void writeProcessorResultOnOrAfter(StreamOutput out, List processorResult) throws IOException { - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { out.writeList(processorResult); } } From 13ab4ec048923495d738f09dd11da964e085d42d Mon Sep 17 00:00:00 2001 From: Mingshi Liu Date: Thu, 23 Jan 2025 15:44:38 -0800 Subject: [PATCH 05/48] Introduce Template query (#16818) Introduce template query that holds the content of query which can contain placeholders and can be filled by the variables from PipelineProcessingContext produced by search processors. This allows query rewrite by the search processors. --------- Signed-off-by: Mingshi Liu Co-authored-by: Michael Froh --- CHANGELOG.md | 1 + server/build.gradle | 1 - .../action/search/TransportSearchAction.java | 2 +- .../index/query/BaseQueryRewriteContext.java | 140 +++ .../opensearch/index/query/QueryBuilders.java | 10 + .../index/query/QueryCoordinatorContext.java | 93 ++ .../index/query/QueryRewriteContext.java | 96 +- .../index/query/QueryShardContext.java | 2 +- .../index/query/TemplateQueryBuilder.java | 198 +++++ .../opensearch/indices/IndicesService.java | 3 +- .../org/opensearch/search/SearchModule.java | 3 +- .../org/opensearch/search/SearchService.java | 6 +- .../pipeline/PipelineProcessingContext.java | 4 + .../search/pipeline/PipelinedRequest.java | 4 + .../index/mapper/DateFieldTypeTests.java | 7 +- .../index/query/RewriteableTests.java | 6 +- .../query/TemplateQueryBuilderTests.java | 834 ++++++++++++++++++ .../opensearch/search/SearchModuleTests.java | 3 +- .../AggregatorFactoriesTests.java | 11 +- .../aggregations/bucket/FiltersTests.java | 16 +- .../builder/SearchSourceBuilderTests.java | 4 +- 21 files changed, 1333 insertions(+), 111 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/query/BaseQueryRewriteContext.java create mode 100644 server/src/main/java/org/opensearch/index/query/QueryCoordinatorContext.java create mode 100644 server/src/main/java/org/opensearch/index/query/TemplateQueryBuilder.java create mode 100644 server/src/test/java/org/opensearch/index/query/TemplateQueryBuilderTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 5e022a3909731..507d9906ca5e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added a new `time` field to replace the deprecated `getTime` field in `GetStats`. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009)) - Improve flat_object field parsing performance by reducing two passes to a single pass ([#16297](https://github.com/opensearch-project/OpenSearch/pull/16297)) - Improve performance of the bitmap filtering([#16936](https://github.com/opensearch-project/OpenSearch/pull/16936/)) +- Introduce Template query ([#16818](https://github.com/opensearch-project/OpenSearch/pull/16818)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) diff --git a/server/build.gradle b/server/build.gradle index 6559c7247200a..82eafb07a7ad3 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -70,7 +70,6 @@ dependencies { api project(":libs:opensearch-telemetry") api project(":libs:opensearch-task-commons") - compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index dfec2e1fda738..898174d60de76 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -476,7 +476,7 @@ private void executeRequest( } else { Rewriteable.rewriteAndFetch( sr.source(), - searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis), + searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis, searchRequest), rewriteListener ); } diff --git a/server/src/main/java/org/opensearch/index/query/BaseQueryRewriteContext.java b/server/src/main/java/org/opensearch/index/query/BaseQueryRewriteContext.java new file mode 100644 index 0000000000000..7cfaf9edb4709 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/BaseQueryRewriteContext.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.client.Client; +import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.XContentParser; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.LongSupplier; + +/** + * BaseQueryRewriteContext is a base implementation of the QueryRewriteContext interface. + * It provides core functionality for query rewriting operations in OpenSearch. + * + * This class manages the context for query rewriting, including handling of asynchronous actions, + * access to content registries, and time-related operations. + */ +public class BaseQueryRewriteContext implements QueryRewriteContext { + private final NamedXContentRegistry xContentRegistry; + private final NamedWriteableRegistry writeableRegistry; + protected final Client client; + protected final LongSupplier nowInMillis; + private final List>> asyncActions = new ArrayList<>(); + private final boolean validate; + + public BaseQueryRewriteContext( + NamedXContentRegistry xContentRegistry, + NamedWriteableRegistry writeableRegistry, + Client client, + LongSupplier nowInMillis + ) { + this(xContentRegistry, writeableRegistry, client, nowInMillis, false); + } + + public BaseQueryRewriteContext( + NamedXContentRegistry xContentRegistry, + NamedWriteableRegistry writeableRegistry, + Client client, + LongSupplier nowInMillis, + boolean validate + ) { + + this.xContentRegistry = xContentRegistry; + this.writeableRegistry = writeableRegistry; + this.client = client; + this.nowInMillis = nowInMillis; + this.validate = validate; + } + + /** + * The registry used to build new {@link XContentParser}s. Contains registered named parsers needed to parse the query. + */ + public NamedXContentRegistry getXContentRegistry() { + return xContentRegistry; + } + + /** + * Returns the time in milliseconds that is shared across all resources involved. Even across shards and nodes. + */ + public long nowInMillis() { + return nowInMillis.getAsLong(); + } + + public NamedWriteableRegistry getWriteableRegistry() { + return writeableRegistry; + } + + /** + * Returns an instance of {@link QueryShardContext} if available of null otherwise + */ + public QueryShardContext convertToShardContext() { + return null; + } + + /** + * Registers an async action that must be executed before the next rewrite round in order to make progress. + * This should be used if a rewriteabel needs to fetch some external resources in order to be executed ie. a document + * from an index. + */ + public void registerAsyncAction(BiConsumer> asyncAction) { + asyncActions.add(asyncAction); + } + + /** + * Returns true if there are any registered async actions. + */ + public boolean hasAsyncActions() { + return asyncActions.isEmpty() == false; + } + + /** + * Executes all registered async actions and notifies the listener once it's done. The value that is passed to the listener is always + * null. The list of registered actions is cleared once this method returns. + */ + public void executeAsyncActions(ActionListener listener) { + if (asyncActions.isEmpty()) { + listener.onResponse(null); + return; + } + + CountDown countDown = new CountDown(asyncActions.size()); + ActionListener internalListener = new ActionListener() { + @Override + public void onResponse(Object o) { + if (countDown.countDown()) { + listener.onResponse(null); + } + } + + @Override + public void onFailure(Exception e) { + if (countDown.fastForward()) { + listener.onFailure(e); + } + } + }; + // make a copy to prevent concurrent modification exception + List>> biConsumers = new ArrayList<>(asyncActions); + asyncActions.clear(); + for (BiConsumer> action : biConsumers) { + action.accept(client, internalListener); + } + } + + public boolean validate() { + return validate; + } +} diff --git a/server/src/main/java/org/opensearch/index/query/QueryBuilders.java b/server/src/main/java/org/opensearch/index/query/QueryBuilders.java index 387d21830aa38..1debba73136b2 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/opensearch/index/query/QueryBuilders.java @@ -50,6 +50,7 @@ import java.io.IOException; import java.util.Collection; import java.util.List; +import java.util.Map; /** * Utility class to create search queries. @@ -780,4 +781,13 @@ public static GeoShapeQueryBuilder geoDisjointQuery(String name, String indexedS public static ExistsQueryBuilder existsQuery(String name) { return new ExistsQueryBuilder(name); } + + /** + * A query that contains a template with holder that should be resolved by search processors + * + * @param content The content of the template + */ + public static TemplateQueryBuilder templateQuery(Map content) { + return new TemplateQueryBuilder(content); + } } diff --git a/server/src/main/java/org/opensearch/index/query/QueryCoordinatorContext.java b/server/src/main/java/org/opensearch/index/query/QueryCoordinatorContext.java new file mode 100644 index 0000000000000..c99a952ee42e3 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/QueryCoordinatorContext.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.client.Client; +import org.opensearch.common.annotation.PublicApi; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.search.pipeline.PipelinedRequest; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.BiConsumer; + +/** + * The QueryCoordinatorContext class implements the QueryRewriteContext interface and provides + * additional functionality for coordinating query rewriting in OpenSearch. + * + * This class acts as a wrapper around a QueryRewriteContext instance and a PipelinedRequest, + * allowing access to both rewrite context methods and pass over search request information. + * + * @since 2.19.0 + */ +@PublicApi(since = "2.19.0") +public class QueryCoordinatorContext implements QueryRewriteContext { + private final QueryRewriteContext rewriteContext; + private final PipelinedRequest searchRequest; + + public QueryCoordinatorContext(QueryRewriteContext rewriteContext, PipelinedRequest searchRequest) { + this.rewriteContext = rewriteContext; + this.searchRequest = searchRequest; + } + + @Override + public NamedXContentRegistry getXContentRegistry() { + return rewriteContext.getXContentRegistry(); + } + + @Override + public long nowInMillis() { + return rewriteContext.nowInMillis(); + } + + @Override + public NamedWriteableRegistry getWriteableRegistry() { + return rewriteContext.getWriteableRegistry(); + } + + @Override + public QueryShardContext convertToShardContext() { + return rewriteContext.convertToShardContext(); + } + + @Override + public void registerAsyncAction(BiConsumer> asyncAction) { + rewriteContext.registerAsyncAction(asyncAction); + } + + @Override + public boolean hasAsyncActions() { + return rewriteContext.hasAsyncActions(); + } + + @Override + public void executeAsyncActions(ActionListener listener) { + rewriteContext.executeAsyncActions(listener); + } + + @Override + public boolean validate() { + return rewriteContext.validate(); + } + + @Override + public QueryCoordinatorContext convertToCoordinatorContext() { + return this; + } + + public Map getContextVariables() { + + // Read from pipeline context + Map contextVariables = new HashMap<>(searchRequest.getPipelineProcessingContext().getAttributes()); + + return contextVariables; + } +} diff --git a/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java b/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java index 15a6d0b5a774e..aec5914066ab5 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryRewriteContext.java @@ -33,16 +33,12 @@ import org.opensearch.client.Client; import org.opensearch.common.annotation.PublicApi; -import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; -import java.util.ArrayList; -import java.util.List; import java.util.function.BiConsumer; -import java.util.function.LongSupplier; /** * Context object used to rewrite {@link QueryBuilder} instances into simplified version. @@ -50,60 +46,27 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class QueryRewriteContext { - private final NamedXContentRegistry xContentRegistry; - private final NamedWriteableRegistry writeableRegistry; - protected final Client client; - protected final LongSupplier nowInMillis; - private final List>> asyncActions = new ArrayList<>(); - private final boolean validate; - - public QueryRewriteContext( - NamedXContentRegistry xContentRegistry, - NamedWriteableRegistry writeableRegistry, - Client client, - LongSupplier nowInMillis - ) { - this(xContentRegistry, writeableRegistry, client, nowInMillis, false); - } - - public QueryRewriteContext( - NamedXContentRegistry xContentRegistry, - NamedWriteableRegistry writeableRegistry, - Client client, - LongSupplier nowInMillis, - boolean validate - ) { - - this.xContentRegistry = xContentRegistry; - this.writeableRegistry = writeableRegistry; - this.client = client; - this.nowInMillis = nowInMillis; - this.validate = validate; - } - +public interface QueryRewriteContext { /** * The registry used to build new {@link XContentParser}s. Contains registered named parsers needed to parse the query. */ - public NamedXContentRegistry getXContentRegistry() { - return xContentRegistry; - } + NamedXContentRegistry getXContentRegistry(); /** * Returns the time in milliseconds that is shared across all resources involved. Even across shards and nodes. */ - public long nowInMillis() { - return nowInMillis.getAsLong(); - } + long nowInMillis(); - public NamedWriteableRegistry getWriteableRegistry() { - return writeableRegistry; - } + NamedWriteableRegistry getWriteableRegistry(); /** * Returns an instance of {@link QueryShardContext} if available of null otherwise */ - public QueryShardContext convertToShardContext() { + default QueryShardContext convertToShardContext() { + return null; + } + + default QueryCoordinatorContext convertToCoordinatorContext() { return null; } @@ -112,51 +75,18 @@ public QueryShardContext convertToShardContext() { * This should be used if a rewriteabel needs to fetch some external resources in order to be executed ie. a document * from an index. */ - public void registerAsyncAction(BiConsumer> asyncAction) { - asyncActions.add(asyncAction); - } + void registerAsyncAction(BiConsumer> asyncAction); /** * Returns true if there are any registered async actions. */ - public boolean hasAsyncActions() { - return asyncActions.isEmpty() == false; - } + boolean hasAsyncActions(); /** * Executes all registered async actions and notifies the listener once it's done. The value that is passed to the listener is always * null. The list of registered actions is cleared once this method returns. */ - public void executeAsyncActions(ActionListener listener) { - if (asyncActions.isEmpty()) { - listener.onResponse(null); - } else { - CountDown countDown = new CountDown(asyncActions.size()); - ActionListener internalListener = new ActionListener() { - @Override - public void onResponse(Object o) { - if (countDown.countDown()) { - listener.onResponse(null); - } - } - - @Override - public void onFailure(Exception e) { - if (countDown.fastForward()) { - listener.onFailure(e); - } - } - }; - // make a copy to prevent concurrent modification exception - List>> biConsumers = new ArrayList<>(asyncActions); - asyncActions.clear(); - for (BiConsumer> action : biConsumers) { - action.accept(client, internalListener); - } - } - } + void executeAsyncActions(ActionListener listener); - public boolean validate() { - return validate; - } + boolean validate(); } diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index d717f10b17d9c..0610752e532e7 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -100,7 +100,7 @@ * @opensearch.api */ @PublicApi(since = "1.0.0") -public class QueryShardContext extends QueryRewriteContext { +public class QueryShardContext extends BaseQueryRewriteContext { private final ScriptService scriptService; private final IndexSettings indexSettings; diff --git a/server/src/main/java/org/opensearch/index/query/TemplateQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/TemplateQueryBuilder.java new file mode 100644 index 0000000000000..85d119ab704ec --- /dev/null +++ b/server/src/main/java/org/opensearch/index/query/TemplateQueryBuilder.java @@ -0,0 +1,198 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.apache.lucene.search.Query; +import org.opensearch.common.xcontent.LoggingDeprecationHandler; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * A query builder that constructs a query based on a template and context variables. + * This query is designed to be rewritten with variables from search processors. + */ + +public class TemplateQueryBuilder extends AbstractQueryBuilder { + public static final String NAME = "template"; + public static final String queryName = "template"; + private final Map content; + + /** + * Constructs a new TemplateQueryBuilder with the given content. + * + * @param content The template content as a map. + */ + public TemplateQueryBuilder(Map content) { + this.content = content; + } + + /** + * Creates a TemplateQueryBuilder from XContent. + * + * @param parser The XContentParser to read from. + * @return A new TemplateQueryBuilder instance. + * @throws IOException If there's an error parsing the content. + */ + public static TemplateQueryBuilder fromXContent(XContentParser parser) throws IOException { + return new TemplateQueryBuilder(parser.map()); + } + + /** + * Constructs a TemplateQueryBuilder from a stream input. + * + * @param in The StreamInput to read from. + * @throws IOException If there's an error reading from the stream. + */ + public TemplateQueryBuilder(StreamInput in) throws IOException { + super(in); + this.content = in.readMap(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeMap(content); + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(NAME, content); + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + throw new IllegalStateException( + "Template queries cannot be converted directly to a query. Template Query must be rewritten first during doRewrite." + ); + } + + @Override + protected boolean doEquals(TemplateQueryBuilder other) { + return Objects.equals(this.content, other.content); + } + + @Override + protected int doHashCode() { + return Objects.hash(content); + } + + @Override + public String getWriteableName() { + return NAME; + } + + /** + * Gets the content of this template query. + * + * @return The template content as a map. + */ + public Map getContent() { + return content; + } + + /** + * Rewrites the template query by substituting variables from the context. + * + * @param queryCoordinatorContext The context for query rewriting. + * @return A rewritten QueryBuilder. + * @throws IOException If there's an error during rewriting. + */ + @Override + protected QueryBuilder doRewrite(QueryRewriteContext queryCoordinatorContext) throws IOException { + // the queryRewrite is expected at QueryCoordinator level + if (!(queryCoordinatorContext instanceof QueryCoordinatorContext)) { + throw new IllegalStateException( + "Template Query must be rewritten at the coordinator node. Rewriting at shard level is not supported." + ); + } + + QueryCoordinatorContext convertedQueryCoordinateContext = (QueryCoordinatorContext) queryCoordinatorContext; + Map contextVariables = convertedQueryCoordinateContext.getContextVariables(); + String queryString; + + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + builder.map(this.content); + queryString = builder.toString(); + } + + // Convert Map to Map with proper JSON escaping + Map variablesMap = null; + if (contextVariables != null) { + variablesMap = contextVariables.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> { + try { + return JsonXContent.contentBuilder().value(entry.getValue()).toString(); + } catch (IOException e) { + throw new RuntimeException("Error converting contextVariables to JSON string", e); + } + })); + } + String newQueryContent = replaceVariables(queryString, variablesMap); + + try { + XContentParser parser = XContentType.JSON.xContent() + .createParser(queryCoordinatorContext.getXContentRegistry(), LoggingDeprecationHandler.INSTANCE, newQueryContent); + + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + + QueryBuilder newQueryBuilder = parseInnerQueryBuilder(parser); + + return newQueryBuilder; + + } catch (Exception e) { + throw new IllegalArgumentException("Failed to rewrite template query: " + newQueryContent, e); + } + } + + private String replaceVariables(String template, Map variables) { + if (template == null || template.equals("null")) { + throw new IllegalArgumentException("Template string cannot be null. A valid template must be provided."); + } + if (template.isEmpty() || template.equals("{}")) { + throw new IllegalArgumentException("Template string cannot be empty. A valid template must be provided."); + } + if (variables == null || variables.isEmpty()) { + return template; + } + + StringBuilder result = new StringBuilder(); + int start = 0; + while (true) { + int startVar = template.indexOf("\"${", start); + if (startVar == -1) { + result.append(template.substring(start)); + break; + } + result.append(template, start, startVar); + int endVar = template.indexOf("}\"", startVar); + if (endVar == -1) { + throw new IllegalArgumentException("Unclosed variable in template: " + template.substring(startVar)); + } + String varName = template.substring(startVar + 3, endVar); + String replacement = variables.get(varName); + if (replacement == null) { + throw new IllegalArgumentException("Variable not found: " + varName); + } + result.append(replacement); + start = endVar + 2; + } + return result.toString(); + } + +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index b9bad5527e3f4..67fab720d95dd 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -122,6 +122,7 @@ import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.merge.MergeStats; +import org.opensearch.index.query.BaseQueryRewriteContext; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.recovery.RecoveryStats; @@ -1937,7 +1938,7 @@ public QueryRewriteContext getValidationRewriteContext(LongSupplier nowInMillis) * Returns a new {@link QueryRewriteContext} with the given {@code now} provider */ private QueryRewriteContext getRewriteContext(LongSupplier nowInMillis, boolean validate) { - return new QueryRewriteContext(xContentRegistry, namedWriteableRegistry, client, nowInMillis, validate); + return new BaseQueryRewriteContext(xContentRegistry, namedWriteableRegistry, client, nowInMillis, validate); } /** diff --git a/server/src/main/java/org/opensearch/search/SearchModule.java b/server/src/main/java/org/opensearch/search/SearchModule.java index 40e0293f88f07..24bdb2d28cba2 100644 --- a/server/src/main/java/org/opensearch/search/SearchModule.java +++ b/server/src/main/java/org/opensearch/search/SearchModule.java @@ -86,6 +86,7 @@ import org.opensearch.index.query.SpanOrQueryBuilder; import org.opensearch.index.query.SpanTermQueryBuilder; import org.opensearch.index.query.SpanWithinQueryBuilder; +import org.opensearch.index.query.TemplateQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.index.query.TermsSetQueryBuilder; @@ -1172,7 +1173,7 @@ private void registerQueryParsers(List plugins) { registerQuery( new QuerySpec<>(MatchBoolPrefixQueryBuilder.NAME, MatchBoolPrefixQueryBuilder::new, MatchBoolPrefixQueryBuilder::fromXContent) ); - + registerQuery(new QuerySpec<>(TemplateQueryBuilder.NAME, TemplateQueryBuilder::new, TemplateQueryBuilder::fromXContent)); if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQuery(new QuerySpec<>(GeoShapeQueryBuilder.NAME, GeoShapeQueryBuilder::new, GeoShapeQueryBuilder::fromXContent)); } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index e892a2f1a7620..d4380eb09e360 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -85,6 +85,7 @@ import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryCoordinatorContext; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.Rewriteable; @@ -126,6 +127,7 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.pipeline.PipelinedRequest; import org.opensearch.search.profile.Profilers; import org.opensearch.search.query.QueryPhase; import org.opensearch.search.query.QuerySearchRequest; @@ -1775,8 +1777,8 @@ private void rewriteAndFetchShardRequest(IndexShard shard, ShardSearchRequest re /** * Returns a new {@link QueryRewriteContext} with the given {@code now} provider */ - public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis) { - return indicesService.getRewriteContext(nowInMillis); + public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis, PipelinedRequest searchRequest) { + return new QueryCoordinatorContext(indicesService.getRewriteContext(nowInMillis), searchRequest); } /** diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java index 7e86c30ddbbd9..c7fad1363cf2f 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineProcessingContext.java @@ -57,4 +57,8 @@ public void addProcessorExecutionDetail(ProcessorExecutionDetail detail) { public List getProcessorExecutionDetails() { return Collections.unmodifiableList(processorExecutionDetails); } + + public Map getAttributes() { + return attributes; + } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java index f5ce94946dd32..b35784aef5582 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelinedRequest.java @@ -61,4 +61,8 @@ public void transformSearchPhaseResults( Pipeline getPipeline() { return pipeline; } + + public PipelineProcessingContext getPipelineProcessingContext() { + return requestContext; + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java index febdb17201edd..e3f2f6e5ea27c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/DateFieldTypeTests.java @@ -75,6 +75,7 @@ import org.opensearch.index.mapper.DateFieldMapper.Resolution; import org.opensearch.index.mapper.MappedFieldType.Relation; import org.opensearch.index.mapper.ParseContext.Document; +import org.opensearch.index.query.BaseQueryRewriteContext; import org.opensearch.index.query.DateRangeIncludingNowQuery; import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.QueryShardContext; @@ -99,7 +100,7 @@ public class DateFieldTypeTests extends FieldTypeTestCase { private static final long nowInMillis = 0; public void testIsFieldWithinRangeEmptyReader() throws IOException { - QueryRewriteContext context = new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis); + QueryRewriteContext context = new BaseQueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis); IndexReader reader = new MultiReader(); DateFieldType ft = new DateFieldType("my_date"); assertEquals( @@ -136,7 +137,7 @@ public void isFieldWithinRangeTestCase(DateFieldType ft) throws IOException { doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null); doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, alternateFormat); - QueryRewriteContext context = new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis); + QueryRewriteContext context = new BaseQueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis); // Fields with no value indexed. DateFieldType ft2 = new DateFieldType("my_date2"); @@ -148,7 +149,7 @@ public void isFieldWithinRangeTestCase(DateFieldType ft) throws IOException { private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, DateTimeZone zone, DateMathParser alternateFormat) throws IOException { - QueryRewriteContext context = new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis); + QueryRewriteContext context = new BaseQueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis); assertEquals( Relation.INTERSECTS, ft.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", randomBoolean(), randomBoolean(), null, null, context) diff --git a/server/src/test/java/org/opensearch/index/query/RewriteableTests.java b/server/src/test/java/org/opensearch/index/query/RewriteableTests.java index 6385a57f9f370..6e58023ecc7e2 100644 --- a/server/src/test/java/org/opensearch/index/query/RewriteableTests.java +++ b/server/src/test/java/org/opensearch/index/query/RewriteableTests.java @@ -45,7 +45,7 @@ public class RewriteableTests extends OpenSearchTestCase { public void testRewrite() throws IOException { - QueryRewriteContext context = new QueryRewriteContext(null, null, null, null); + QueryRewriteContext context = new BaseQueryRewriteContext(null, null, null, null); TestRewriteable rewrite = Rewriteable.rewrite( new TestRewriteable(randomIntBetween(0, Rewriteable.MAX_REWRITE_ROUNDS)), context, @@ -65,7 +65,7 @@ public void testRewrite() throws IOException { } public void testRewriteAndFetch() throws ExecutionException, InterruptedException { - QueryRewriteContext context = new QueryRewriteContext(null, null, null, null); + BaseQueryRewriteContext context = new BaseQueryRewriteContext(null, null, null, null); PlainActionFuture future = new PlainActionFuture<>(); Rewriteable.rewriteAndFetch(new TestRewriteable(randomIntBetween(0, Rewriteable.MAX_REWRITE_ROUNDS), true), context, future); TestRewriteable rewrite = future.get(); @@ -83,7 +83,7 @@ public void testRewriteAndFetch() throws ExecutionException, InterruptedExceptio } public void testRewriteList() throws IOException { - QueryRewriteContext context = new QueryRewriteContext(null, null, null, null); + BaseQueryRewriteContext context = new BaseQueryRewriteContext(null, null, null, null); List rewriteableList = new ArrayList<>(); int numInstances = randomIntBetween(1, 10); rewriteableList.add(new TestRewriteable(randomIntBetween(1, Rewriteable.MAX_REWRITE_ROUNDS))); diff --git a/server/src/test/java/org/opensearch/index/query/TemplateQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/TemplateQueryBuilderTests.java new file mode 100644 index 0000000000000..4ea01818ca32e --- /dev/null +++ b/server/src/test/java/org/opensearch/index/query/TemplateQueryBuilderTests.java @@ -0,0 +1,834 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.query; + +import org.opensearch.client.Client; +import org.opensearch.common.geo.GeoPoint; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.search.SearchModule; +import org.opensearch.search.builder.SearchSourceBuilder; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.function.BiConsumer; + +import static org.opensearch.index.query.TemplateQueryBuilder.NAME; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TemplateQueryBuilderTests extends OpenSearchTestCase { + + /** + * Tests the fromXContent method of TemplateQueryBuilder. + * Verifies that a TemplateQueryBuilder can be correctly created from XContent. + */ + public void testFromXContent() throws IOException { + /* + { + "template": { + "term": { + "message": { + "value": "foo" + } + } + } + } + */ + Map template = new HashMap<>(); + Map term = new HashMap<>(); + Map message = new HashMap<>(); + + message.put("value", "foo"); + term.put("message", message); + template.put("term", term); + + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(template); + + XContentParser contentParser = createParser(xContentBuilder); + contentParser.nextToken(); + TemplateQueryBuilder templateQueryBuilder = TemplateQueryBuilder.fromXContent(contentParser); + + assertEquals(NAME, templateQueryBuilder.getWriteableName()); + assertEquals(template, templateQueryBuilder.getContent()); + + SearchSourceBuilder source = new SearchSourceBuilder().query(templateQueryBuilder); + assertEquals(source.toString(), "{\"query\":{\"template\":{\"term\":{\"message\":{\"value\":\"foo\"}}}}}"); + } + + /** + * Tests the query source generation of TemplateQueryBuilder. + * Verifies that the correct query source is generated from a TemplateQueryBuilder. + */ + public void testQuerySource() { + + Map template = new HashMap<>(); + Map term = new HashMap<>(); + Map message = new HashMap<>(); + + message.put("value", "foo"); + term.put("message", message); + template.put("term", term); + QueryBuilder incomingQuery = new TemplateQueryBuilder(template); + SearchSourceBuilder source = new SearchSourceBuilder().query(incomingQuery); + assertEquals(source.toString(), "{\"query\":{\"template\":{\"term\":{\"message\":{\"value\":\"foo\"}}}}}"); + } + + /** + * Tests parsing a TemplateQueryBuilder from a JSON string. + * Verifies that the parsed query matches the expected structure and can be serialized and deserialized. + */ + public void testFromJson() throws IOException { + String jsonString = "{\n" + + " \"geo_shape\": {\n" + + " \"location\": {\n" + + " \"shape\": {\n" + + " \"type\": \"Envelope\",\n" + + " \"coordinates\": \"${modelPredictionOutcome}\"\n" + + " },\n" + + " \"relation\": \"intersects\"\n" + + " },\n" + + " \"ignore_unmapped\": false,\n" + + " \"boost\": 42.0\n" + + " }\n" + + "}"; + + XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, jsonString); + parser.nextToken(); + TemplateQueryBuilder parsed = TemplateQueryBuilder.fromXContent(parser); + + // Check if the parsed query is an instance of TemplateQueryBuilder + assertNotNull(parsed); + assertTrue(parsed instanceof TemplateQueryBuilder); + + // Check if the content of the parsed query matches the expected content + Map expectedContent = new HashMap<>(); + Map geoShape = new HashMap<>(); + Map location = new HashMap<>(); + Map shape = new HashMap<>(); + + shape.put("type", "Envelope"); + shape.put("coordinates", "${modelPredictionOutcome}"); + location.put("shape", shape); + location.put("relation", "intersects"); + geoShape.put("location", location); + geoShape.put("ignore_unmapped", false); + geoShape.put("boost", 42.0); + expectedContent.put("geo_shape", geoShape); + + Map actualContent = new HashMap<>(); + actualContent.put("template", expectedContent); + assertEquals(expectedContent, parsed.getContent()); + + // Test that the query can be serialized and deserialized + BytesStreamOutput out = new BytesStreamOutput(); + parsed.writeTo(out); + StreamInput in = out.bytes().streamInput(); + TemplateQueryBuilder deserializedQuery = new TemplateQueryBuilder(in); + assertEquals(parsed.getContent(), deserializedQuery.getContent()); + + // Test that the query can be converted to XContent + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + parsed.doXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + Map expectedJson = new HashMap<>(); + Map template = new HashMap<>(); + template.put("geo_shape", geoShape); + expectedJson.put("template", template); + + XContentParser jsonParser = XContentType.JSON.xContent() + .createParser(xContentRegistry(), DeprecationHandler.THROW_UNSUPPORTED_OPERATION, builder.toString()); + Map actualJson = jsonParser.map(); + + assertEquals(expectedJson, actualJson); + } + + /** + * Tests the constructor and getter methods of TemplateQueryBuilder. + * Verifies that the content and writeable name are correctly set and retrieved. + */ + public void testConstructorAndGetters() { + Map content = new HashMap<>(); + content.put("key", "value"); + TemplateQueryBuilder builder = new TemplateQueryBuilder(content); + + assertEquals(content, builder.getContent()); + assertEquals(NAME, builder.getWriteableName()); + } + + /** + * Tests the equals and hashCode methods of TemplateQueryBuilder. + * Verifies that two builders with the same content are equal and have the same hash code, + * while builders with different content are not equal and have different hash codes. + */ + public void testEqualsAndHashCode() { + Map content1 = new HashMap<>(); + content1.put("key", "value"); + TemplateQueryBuilder builder1 = new TemplateQueryBuilder(content1); + + Map content2 = new HashMap<>(); + content2.put("key", "value"); + TemplateQueryBuilder builder2 = new TemplateQueryBuilder(content2); + + Map content3 = new HashMap<>(); + content3.put("key", "different_value"); + TemplateQueryBuilder builder3 = new TemplateQueryBuilder(content3); + + assertTrue(builder1.equals(builder2)); + assertTrue(builder1.hashCode() == builder2.hashCode()); + assertFalse(builder1.equals(builder3)); + assertFalse(builder1.hashCode() == builder3.hashCode()); + } + + /** + * Tests the doToQuery method of TemplateQueryBuilder. + * Verifies that calling doToQuery throws an IllegalStateException. + */ + public void testDoToQuery() { + Map content = new HashMap<>(); + content.put("key", "value"); + TemplateQueryBuilder builder = new TemplateQueryBuilder(content); + + QueryShardContext mockContext = mock(QueryShardContext.class); + expectThrows(IllegalStateException.class, () -> builder.doToQuery(mockContext)); + } + + /** + * Tests the serialization and deserialization of TemplateQueryBuilder. + * Verifies that a builder can be written to a stream and read back correctly. + */ + public void testStreamRoundTrip() throws IOException { + Map content = new HashMap<>(); + content.put("key", "value"); + TemplateQueryBuilder original = new TemplateQueryBuilder(content); + + BytesStreamOutput out = new BytesStreamOutput(); + original.writeTo(out); + + StreamInput in = out.bytes().streamInput(); + TemplateQueryBuilder deserialized = new TemplateQueryBuilder(in); + + assertEquals(original, deserialized); + } + + /** + * Tests the doRewrite method of TemplateQueryBuilder with a simple term query. + * Verifies that the template is correctly rewritten to a TermQueryBuilder. + */ + public void testDoRewrite() throws IOException { + + Map template = new HashMap<>(); + Map term = new HashMap<>(); + Map message = new HashMap<>(); + + message.put("value", "foo"); + term.put("message", message); + template.put("term", term); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("message", "foo"); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + + Map contextVariables = new HashMap<>(); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + TermQueryBuilder newQuery = (TermQueryBuilder) templateQueryBuilder.doRewrite(queryRewriteContext); + + assertEquals(newQuery, termQueryBuilder); + assertEquals( + "{\n" + + " \"term\" : {\n" + + " \"message\" : {\n" + + " \"value\" : \"foo\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}", + newQuery.toString() + ); + } + + /** + * Tests the doRewrite method of TemplateQueryBuilder with a string variable. + * Verifies that the template is correctly rewritten with the variable substituted. + */ + public void testDoRewriteWithString() throws IOException { + + Map template = new HashMap<>(); + Map term = new HashMap<>(); + Map message = new HashMap<>(); + + message.put("value", "${response}"); + term.put("message", message); + template.put("term", term); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("message", "foo"); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + + Map contextVariables = new HashMap<>(); + contextVariables.put("response", "foo"); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + TermQueryBuilder newQuery = (TermQueryBuilder) templateQueryBuilder.doRewrite(queryRewriteContext); + + assertEquals(newQuery, termQueryBuilder); + assertEquals( + "{\n" + + " \"term\" : {\n" + + " \"message\" : {\n" + + " \"value\" : \"foo\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}", + newQuery.toString() + ); + } + + /** + * Tests the doRewrite method of TemplateQueryBuilder with a list variable. + * Verifies that the template is correctly rewritten with the list variable substituted. + */ + public void testDoRewriteWithList() throws IOException { + ArrayList termsList = new ArrayList<>(); + termsList.add("foo"); + termsList.add("bar"); + + Map template = new HashMap<>(); + Map terms = new HashMap<>(); + + terms.put("message", "${response}"); + template.put("terms", terms); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + TermsQueryBuilder termsQueryBuilder = new TermsQueryBuilder("message", termsList); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + + Map contextVariables = new HashMap<>(); + contextVariables.put("response", termsList); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + NamedXContentRegistry TEST_XCONTENT_REGISTRY_FOR_QUERY = new NamedXContentRegistry( + new SearchModule(Settings.EMPTY, List.of()).getNamedXContents() + ); + when(queryRewriteContext.getXContentRegistry()).thenReturn(TEST_XCONTENT_REGISTRY_FOR_QUERY); + TermsQueryBuilder newQuery = (TermsQueryBuilder) templateQueryBuilder.doRewrite(queryRewriteContext); + assertEquals(newQuery, termsQueryBuilder); + assertEquals( + "{\n" + + " \"terms\" : {\n" + + " \"message\" : [\n" + + " \"foo\",\n" + + " \"bar\"\n" + + " ],\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}", + newQuery.toString() + ); + } + + /** + * Tests the doRewrite method of TemplateQueryBuilder with a geo_distance query. + * Verifies that the template is correctly rewritten for a geo_distance query. + */ + public void testDoRewriteWithGeoDistanceQuery() throws IOException { + Map template = new HashMap<>(); + Map geoDistance = new HashMap<>(); + + geoDistance.put("distance", "12km"); + geoDistance.put("pin.location", "${geoPoint}"); + template.put("geo_distance", geoDistance); + + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + GeoPoint geoPoint = new GeoPoint(40, -70); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + Map contextVariables = new HashMap<>(); + contextVariables.put("geoPoint", geoPoint); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + GeoDistanceQueryBuilder expectedQuery = new GeoDistanceQueryBuilder("pin.location"); + expectedQuery.point(geoPoint).distance("12km"); + + QueryBuilder newQuery = templateQueryBuilder.doRewrite(queryRewriteContext); + assertEquals(expectedQuery, newQuery); + assertEquals( + "{\n" + + " \"geo_distance\" : {\n" + + " \"pin.location\" : [\n" + + " -70.0,\n" + + " 40.0\n" + + " ],\n" + + " \"distance\" : 12000.0,\n" + + " \"distance_type\" : \"arc\",\n" + + " \"validation_method\" : \"STRICT\",\n" + + " \"ignore_unmapped\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}", + newQuery.toString() + ); + } + + /** + * Tests the doRewrite method of TemplateQueryBuilder with a range query. + * Verifies that the template is correctly rewritten for a range query. + */ + public void testDoRewriteWithRangeQuery() throws IOException { + Map template = new HashMap<>(); + Map range = new HashMap<>(); + Map age = new HashMap<>(); + + age.put("gte", "${minAge}"); + age.put("lte", "${maxAge}"); + range.put("age", age); + template.put("range", range); + + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + Map contextVariables = new HashMap<>(); + contextVariables.put("minAge", 25); + contextVariables.put("maxAge", 35); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + RangeQueryBuilder expectedQuery = new RangeQueryBuilder("age"); + expectedQuery.gte(25).lte(35); + + QueryBuilder newQuery = templateQueryBuilder.doRewrite(queryRewriteContext); + assertEquals(expectedQuery, newQuery); + assertEquals( + "{\n" + + " \"range\" : {\n" + + " \"age\" : {\n" + + " \"from\" : 25,\n" + + " \"to\" : 35,\n" + + " \"include_lower\" : true,\n" + + " \"include_upper\" : true,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}", + newQuery.toString() + ); + } + + /** + * Tests the doRewrite method of TemplateQueryBuilder with a nested map variable. + * Verifies that the template is correctly rewritten with the nested map variable substituted. + */ + public void testDoRewriteWithNestedMap() throws IOException { + Map template = new HashMap<>(); + Map bool = new HashMap<>(); + List> must = new ArrayList<>(); + Map match = new HashMap<>(); + Map textEntry = new HashMap<>(); + + textEntry.put("text_entry", "${keyword}"); + match.put("match", textEntry); + must.add(match); + bool.put("must", must); + + List> should = new ArrayList<>(); + Map shouldMatch1 = new HashMap<>(); + Map shouldTextEntry1 = new HashMap<>(); + shouldTextEntry1.put("text_entry", "life"); + shouldMatch1.put("match", shouldTextEntry1); + should.add(shouldMatch1); + + Map shouldMatch2 = new HashMap<>(); + Map shouldTextEntry2 = new HashMap<>(); + shouldTextEntry2.put("text_entry", "grace"); + shouldMatch2.put("match", shouldTextEntry2); + should.add(shouldMatch2); + + bool.put("should", should); + bool.put("minimum_should_match", 1); + + Map filter = new HashMap<>(); + Map term = new HashMap<>(); + term.put("play_name", "Romeo and Juliet"); + filter.put("term", term); + bool.put("filter", filter); + + template.put("bool", bool); + + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + Map contextVariables = new HashMap<>(); + contextVariables.put("keyword", "love"); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + BoolQueryBuilder expectedQuery = new BoolQueryBuilder().must(new MatchQueryBuilder("text_entry", "love")) + .should(new MatchQueryBuilder("text_entry", "life")) + .should(new MatchQueryBuilder("text_entry", "grace")) + .filter(new TermQueryBuilder("play_name", "Romeo and Juliet")) + .minimumShouldMatch(1); + + QueryBuilder newQuery = templateQueryBuilder.doRewrite(queryRewriteContext); + assertEquals(expectedQuery, newQuery); + assertEquals( + "{\n" + + " \"bool\" : {\n" + + " \"must\" : [\n" + + " {\n" + + " \"match\" : {\n" + + " \"text_entry\" : {\n" + + " \"query\" : \"love\",\n" + + " \"operator\" : \"OR\",\n" + + " \"prefix_length\" : 0,\n" + + " \"max_expansions\" : 50,\n" + + " \"fuzzy_transpositions\" : true,\n" + + " \"lenient\" : false,\n" + + " \"zero_terms_query\" : \"NONE\",\n" + + " \"auto_generate_synonyms_phrase_query\" : true,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"filter\" : [\n" + + " {\n" + + " \"term\" : {\n" + + " \"play_name\" : {\n" + + " \"value\" : \"Romeo and Juliet\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"should\" : [\n" + + " {\n" + + " \"match\" : {\n" + + " \"text_entry\" : {\n" + + " \"query\" : \"life\",\n" + + " \"operator\" : \"OR\",\n" + + " \"prefix_length\" : 0,\n" + + " \"max_expansions\" : 50,\n" + + " \"fuzzy_transpositions\" : true,\n" + + " \"lenient\" : false,\n" + + " \"zero_terms_query\" : \"NONE\",\n" + + " \"auto_generate_synonyms_phrase_query\" : true,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " },\n" + + " {\n" + + " \"match\" : {\n" + + " \"text_entry\" : {\n" + + " \"query\" : \"grace\",\n" + + " \"operator\" : \"OR\",\n" + + " \"prefix_length\" : 0,\n" + + " \"max_expansions\" : 50,\n" + + " \"fuzzy_transpositions\" : true,\n" + + " \"lenient\" : false,\n" + + " \"zero_terms_query\" : \"NONE\",\n" + + " \"auto_generate_synonyms_phrase_query\" : true,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " }\n" + + " ],\n" + + " \"adjust_pure_negative\" : true,\n" + + " \"minimum_should_match\" : \"1\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}", + newQuery.toString() + ); + } + + /** + * Tests the doRewrite method with an invalid query type. + * Verifies that an IOException is thrown when an invalid query type is used. + */ + public void testDoRewriteWithInvalidQueryType() throws IOException { + Map template = new HashMap<>(); + template.put("invalid_query_type", new HashMap<>()); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + when(queryRewriteContext.getContextVariables()).thenReturn(new HashMap<>()); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertTrue(exception.getMessage().contains("Failed to rewrite template query")); + } + + /** + * Tests the doRewrite method with a malformed JSON query. + * Verifies that an IOException is thrown when the query JSON is malformed. + */ + public void testDoRewriteWithMalformedJson() throws IOException { + Map template = new HashMap<>(); + template.put("malformed_json", "{ this is not valid JSON }"); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + when(queryRewriteContext.getContextVariables()).thenReturn(new HashMap<>()); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertTrue(exception.getMessage().contains("Failed to rewrite template query")); + } + + /** + * Tests the doRewrite method with an invalid matchall query. + * Verifies that an IOException is thrown when an invalid matchall query is used. + */ + public void testDoRewriteWithInvalidMatchAllQuery() throws IOException { + Map template = new HashMap<>(); + template.put("matchall_1", new HashMap<>()); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + when(queryRewriteContext.getContextVariables()).thenReturn(new HashMap<>()); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertTrue(exception.getMessage().contains("Failed to rewrite template query")); + } + + /** + * Tests the doRewrite method with a missing required field in a query. + * Verifies that an IOException is thrown when a required field is missing. + */ + public void testDoRewriteWithMissingRequiredField() throws IOException { + Map template = new HashMap<>(); + template.put("term", "value");// Missing the required field for term query + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + when(queryRewriteContext.getContextVariables()).thenReturn(new HashMap<>()); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertTrue(exception.getMessage().contains("Failed to rewrite template query")); + } + + /** + * Tests the doRewrite method with a malformed variable substitution. + * Verifies that an IOException is thrown when a malformed variable is used. + */ + public void testDoRewriteWithMalformedVariableSubstitution() throws IOException { + + Map template = new HashMap<>(); + Map terms = new HashMap<>(); + + terms.put("message", "${response}"); + template.put("terms", terms); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + + Map contextVariables = new HashMap<>(); + contextVariables.put("response", "should be a list but this is a string"); + + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + + assertTrue(exception.getMessage().contains("Failed to rewrite template query")); + } + + /** + * Tests the doRewrite method with a variable not found. + * Verifies that an IOException is thrown when a malformed variable is used. + */ + public void testDoRewriteWithNotFoundVariableSubstitution() throws IOException { + + Map template = new HashMap<>(); + Map term = new HashMap<>(); + Map message = new HashMap<>(); + + message.put("value", "${response}"); + term.put("message", message); + template.put("term", term); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + + Map contextVariables = new HashMap<>(); + contextVariables.put("response1", "foo"); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertTrue(exception.getMessage().contains("Variable not found")); + } + + /** + * Tests the doRewrite method of TemplateQueryBuilder with a missing bracket variable. + * Verifies that the exception is thrown + */ + public void testDoRewriteWithMissingBracketVariable() throws IOException { + + Map template = new HashMap<>(); + Map term = new HashMap<>(); + Map message = new HashMap<>(); + + message.put("value", "${response"); + term.put("message", message); + template.put("term", term); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + + Map contextVariables = new HashMap<>(); + contextVariables.put("response", "foo"); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertTrue(exception.getMessage().contains("Unclosed variable in template")); + } + + /** + * Tests the replaceVariables method when the template is null. + * Verifies that an IllegalArgumentException is thrown with the appropriate error message. + */ + + public void testReplaceVariablesWithNullTemplate() { + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder((Map) null); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + Map contextVariables = new HashMap<>(); + contextVariables.put("response", "foo"); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertEquals("Template string cannot be null. A valid template must be provided.", exception.getMessage()); + } + + /** + * Tests the replaceVariables method when the template is empty. + * Verifies that an IllegalArgumentException is thrown with the appropriate error message. + */ + + public void testReplaceVariablesWithEmptyTemplate() { + Map template = new HashMap<>(); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + Map contextVariables = new HashMap<>(); + contextVariables.put("response", "foo"); + when(queryRewriteContext.getContextVariables()).thenReturn(contextVariables); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> templateQueryBuilder.doRewrite(queryRewriteContext) + ); + assertEquals("Template string cannot be empty. A valid template must be provided.", exception.getMessage()); + + } + + /** + * Tests the replaceVariables method when the variables map is null. + * Verifies that the method returns the original template unchanged, + * since a null variables map is treated as no replacement. + */ + public void testReplaceVariablesWithNullVariables() throws IOException { + + Map template = new HashMap<>(); + Map term = new HashMap<>(); + Map message = new HashMap<>(); + + message.put("value", "foo"); + term.put("message", message); + template.put("term", term); + TemplateQueryBuilder templateQueryBuilder = new TemplateQueryBuilder(template); + TermQueryBuilder termQueryBuilder = new TermQueryBuilder("message", "foo"); + + QueryCoordinatorContext queryRewriteContext = mockQueryRewriteContext(); + + when(queryRewriteContext.getContextVariables()).thenReturn(null); + + TermQueryBuilder newQuery = (TermQueryBuilder) templateQueryBuilder.doRewrite(queryRewriteContext); + + assertEquals(newQuery, termQueryBuilder); + assertEquals( + "{\n" + + " \"term\" : {\n" + + " \"message\" : {\n" + + " \"value\" : \"foo\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + "}", + newQuery.toString() + ); + } + + /** + * Helper method to create a mock QueryCoordinatorContext for testing. + */ + private QueryCoordinatorContext mockQueryRewriteContext() { + QueryCoordinatorContext queryRewriteContext = mock(QueryCoordinatorContext.class); + final CountDownLatch inProgressLatch = new CountDownLatch(1); + doAnswer(invocation -> { + BiConsumer> biConsumer = invocation.getArgument(0); + biConsumer.accept( + null, + ActionListener.wrap( + response -> inProgressLatch.countDown(), + err -> fail("Failed to set query tokens supplier: " + err.getMessage()) + ) + ); + return null; + }).when(queryRewriteContext).registerAsyncAction(any()); + + NamedXContentRegistry TEST_XCONTENT_REGISTRY_FOR_QUERY = new NamedXContentRegistry( + new SearchModule(Settings.EMPTY, List.of()).getNamedXContents() + ); + when(queryRewriteContext.getXContentRegistry()).thenReturn(TEST_XCONTENT_REGISTRY_FOR_QUERY); + + return queryRewriteContext; + } +} diff --git a/server/src/test/java/org/opensearch/search/SearchModuleTests.java b/server/src/test/java/org/opensearch/search/SearchModuleTests.java index 9e8e7afe332f1..d78393e917b2f 100644 --- a/server/src/test/java/org/opensearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/opensearch/search/SearchModuleTests.java @@ -608,7 +608,8 @@ public Optional create(IndexSettings indexSettin "terms_set", "wildcard", "wrapper", - "distance_feature" }; + "distance_feature", + "template" }; // add here deprecated queries to make sure we log a deprecation warnings when they are used private static final String[] DEPRECATED_QUERIES = new String[] { "common", "field_masking_span" }; diff --git a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java index c930d27b068f8..a5724d3c34352 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/AggregatorFactoriesTests.java @@ -45,6 +45,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.Environment; +import org.opensearch.index.query.BaseQueryRewriteContext; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryRewriteContext; @@ -255,7 +256,7 @@ public void testRewriteAggregation() throws Exception { BucketScriptPipelineAggregationBuilder pipelineAgg = new BucketScriptPipelineAggregationBuilder("const", new Script("1")); AggregatorFactories.Builder builder = new AggregatorFactories.Builder().addAggregator(filterAggBuilder) .addPipelineAggregator(pipelineAgg); - AggregatorFactories.Builder rewritten = builder.rewrite(new QueryRewriteContext(xContentRegistry, null, null, () -> 0L)); + AggregatorFactories.Builder rewritten = builder.rewrite(new BaseQueryRewriteContext(xContentRegistry, null, null, () -> 0L)); assertNotSame(builder, rewritten); Collection aggregatorFactories = rewritten.getAggregatorFactories(); assertEquals(1, aggregatorFactories.size()); @@ -268,7 +269,9 @@ public void testRewriteAggregation() throws Exception { assertThat(rewrittenFilter, instanceOf(TermsQueryBuilder.class)); // Check that a further rewrite returns the same aggregation factories builder - AggregatorFactories.Builder secondRewritten = rewritten.rewrite(new QueryRewriteContext(xContentRegistry, null, null, () -> 0L)); + AggregatorFactories.Builder secondRewritten = rewritten.rewrite( + new BaseQueryRewriteContext(xContentRegistry, null, null, () -> 0L) + ); assertSame(rewritten, secondRewritten); } @@ -277,7 +280,7 @@ public void testRewritePipelineAggregationUnderAggregation() throws Exception { new RewrittenPipelineAggregationBuilder() ); AggregatorFactories.Builder builder = new AggregatorFactories.Builder().addAggregator(filterAggBuilder); - QueryRewriteContext context = new QueryRewriteContext(xContentRegistry, null, null, () -> 0L); + QueryRewriteContext context = new BaseQueryRewriteContext(xContentRegistry, null, null, () -> 0L); AggregatorFactories.Builder rewritten = builder.rewrite(context); CountDownLatch latch = new CountDownLatch(1); context.executeAsyncActions(new ActionListener() { @@ -304,7 +307,7 @@ public void testRewriteAggregationAtTopLevel() throws Exception { FilterAggregationBuilder filterAggBuilder = new FilterAggregationBuilder("titles", new MatchAllQueryBuilder()); AggregatorFactories.Builder builder = new AggregatorFactories.Builder().addAggregator(filterAggBuilder) .addPipelineAggregator(new RewrittenPipelineAggregationBuilder()); - QueryRewriteContext context = new QueryRewriteContext(xContentRegistry, null, null, () -> 0L); + QueryRewriteContext context = new BaseQueryRewriteContext(xContentRegistry, null, null, () -> 0L); AggregatorFactories.Builder rewritten = builder.rewrite(context); CountDownLatch latch = new CountDownLatch(1); context.executeAsyncActions(new ActionListener() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java index 56f7f450dbdfb..770f18f781689 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/FiltersTests.java @@ -36,12 +36,12 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.BaseQueryRewriteContext; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.BaseAggregationTestCase; import org.opensearch.search.aggregations.bucket.filter.FiltersAggregationBuilder; @@ -147,12 +147,12 @@ public void testRewrite() throws IOException { // test non-keyed filter that doesn't rewrite AggregationBuilder original = new FiltersAggregationBuilder("my-agg", new MatchAllQueryBuilder()); original.setMetadata(Collections.singletonMap(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20))); - AggregationBuilder rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); + AggregationBuilder rewritten = original.rewrite(new BaseQueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertSame(original, rewritten); // test non-keyed filter that does rewrite original = new FiltersAggregationBuilder("my-agg", new BoolQueryBuilder()); - rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); + rewritten = original.rewrite(new BaseQueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertNotSame(original, rewritten); assertThat(rewritten, instanceOf(FiltersAggregationBuilder.class)); assertEquals("my-agg", ((FiltersAggregationBuilder) rewritten).getName()); @@ -163,12 +163,12 @@ public void testRewrite() throws IOException { // test keyed filter that doesn't rewrite original = new FiltersAggregationBuilder("my-agg", new KeyedFilter("my-filter", new MatchAllQueryBuilder())); - rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); + rewritten = original.rewrite(new BaseQueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertSame(original, rewritten); // test non-keyed filter that does rewrite original = new FiltersAggregationBuilder("my-agg", new KeyedFilter("my-filter", new BoolQueryBuilder())); - rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); + rewritten = original.rewrite(new BaseQueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertNotSame(original, rewritten); assertThat(rewritten, instanceOf(FiltersAggregationBuilder.class)); assertEquals("my-agg", ((FiltersAggregationBuilder) rewritten).getName()); @@ -180,7 +180,7 @@ public void testRewrite() throws IOException { // test sub-agg filter that does rewrite original = new TermsAggregationBuilder("terms").userValueTypeHint(ValueType.BOOLEAN) .subAggregation(new FiltersAggregationBuilder("my-agg", new KeyedFilter("my-filter", new BoolQueryBuilder()))); - rewritten = original.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); + rewritten = original.rewrite(new BaseQueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertNotSame(original, rewritten); assertNotEquals(original, rewritten); assertThat(rewritten, instanceOf(TermsAggregationBuilder.class)); @@ -189,7 +189,7 @@ public void testRewrite() throws IOException { assertThat(subAgg, instanceOf(FiltersAggregationBuilder.class)); assertNotSame(original.getSubAggregations().iterator().next(), subAgg); assertEquals("my-agg", subAgg.getName()); - assertSame(rewritten, rewritten.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L))); + assertSame(rewritten, rewritten.rewrite(new BaseQueryRewriteContext(xContentRegistry(), null, null, () -> 0L))); } public void testRewritePreservesOtherBucket() throws IOException { @@ -197,7 +197,7 @@ public void testRewritePreservesOtherBucket() throws IOException { originalFilters.otherBucket(randomBoolean()); originalFilters.otherBucketKey(randomAlphaOfLength(10)); - AggregationBuilder rewritten = originalFilters.rewrite(new QueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); + AggregationBuilder rewritten = originalFilters.rewrite(new BaseQueryRewriteContext(xContentRegistry(), null, null, () -> 0L)); assertThat(rewritten, instanceOf(FiltersAggregationBuilder.class)); FiltersAggregationBuilder rewrittenFilters = (FiltersAggregationBuilder) rewritten; diff --git a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java index 90962a5c613f1..4ee1ee61d9586 100644 --- a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java @@ -47,10 +47,10 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.query.BaseQueryRewriteContext; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.MatchNoneQueryBuilder; import org.opensearch.index.query.QueryBuilders; -import org.opensearch.index.query.QueryRewriteContext; import org.opensearch.index.query.RandomQueryBuilder; import org.opensearch.index.query.Rewriteable; import org.opensearch.script.Script; @@ -737,7 +737,7 @@ private void assertIndicesBoostParseErrorMessage(String restContent, String expe private SearchSourceBuilder rewrite(SearchSourceBuilder searchSourceBuilder) throws IOException { return Rewriteable.rewrite( searchSourceBuilder, - new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, Long.valueOf(1)::longValue) + new BaseQueryRewriteContext(xContentRegistry(), writableRegistry(), null, Long.valueOf(1)::longValue) ); } } From 931c1aadaed0d8eccee0839d74d706fb47dbd4be Mon Sep 17 00:00:00 2001 From: Ganesh Krishna Ramadurai Date: Thu, 23 Jan 2025 18:04:57 -0800 Subject: [PATCH 06/48] Propagate includes and excludes from fetchSourceContext to FieldsVisitor (#17080) Signed-off-by: Ganesh Ramadurai Co-authored-by: Ganesh Ramadurai --- CHANGELOG.md | 1 + .../fieldvisitor/CustomFieldsVisitor.java | 5 ++ .../index/fieldvisitor/FieldsVisitor.java | 31 ++++++++- .../opensearch/search/fetch/FetchPhase.java | 21 ++++-- .../mapper/StoredNumericValuesTests.java | 34 ++++++++++ .../search/fetch/FetchPhaseTests.java | 64 +++++++++++++++++++ 6 files changed, 151 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 507d9906ca5e1..63ccb634a137b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Improve flat_object field parsing performance by reducing two passes to a single pass ([#16297](https://github.com/opensearch-project/OpenSearch/pull/16297)) - Improve performance of the bitmap filtering([#16936](https://github.com/opensearch-project/OpenSearch/pull/16936/)) - Introduce Template query ([#16818](https://github.com/opensearch-project/OpenSearch/pull/16818)) +- Propagate the sourceIncludes and excludes fields from fetchSourceContext to FieldsVisitor. ([#17080](https://github.com/opensearch-project/OpenSearch/pull/17080)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) diff --git a/server/src/main/java/org/opensearch/index/fieldvisitor/CustomFieldsVisitor.java b/server/src/main/java/org/opensearch/index/fieldvisitor/CustomFieldsVisitor.java index df4d398b2b181..8e6799f6bf74c 100644 --- a/server/src/main/java/org/opensearch/index/fieldvisitor/CustomFieldsVisitor.java +++ b/server/src/main/java/org/opensearch/index/fieldvisitor/CustomFieldsVisitor.java @@ -52,6 +52,11 @@ public CustomFieldsVisitor(Set fields, boolean loadSource) { this.fields = fields; } + public CustomFieldsVisitor(Set fields, boolean loadSource, String[] includes, String[] excludes) { + super(loadSource, includes, excludes); + this.fields = fields; + } + @Override public Status needsField(FieldInfo fieldInfo) { if (super.needsField(fieldInfo) == Status.YES) { diff --git a/server/src/main/java/org/opensearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/opensearch/index/fieldvisitor/FieldsVisitor.java index 91ca07d753cc6..92328745f020e 100644 --- a/server/src/main/java/org/opensearch/index/fieldvisitor/FieldsVisitor.java +++ b/server/src/main/java/org/opensearch/index/fieldvisitor/FieldsVisitor.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.util.BytesRef; +import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.index.mapper.IdFieldMapper; @@ -66,17 +67,29 @@ public class FieldsVisitor extends StoredFieldVisitor { private final boolean loadSource; private final String sourceFieldName; private final Set requiredFields; + private final String[] sourceIncludes; + private final String[] sourceExcludes; protected BytesReference source; protected String id; protected Map> fieldsValues; public FieldsVisitor(boolean loadSource) { - this(loadSource, SourceFieldMapper.NAME); + this(loadSource, SourceFieldMapper.NAME, null, null); + } + + public FieldsVisitor(boolean loadSource, String[] includes, String[] excludes) { + this(loadSource, SourceFieldMapper.NAME, includes, excludes); } public FieldsVisitor(boolean loadSource, String sourceFieldName) { + this(loadSource, sourceFieldName, null, null); + } + + public FieldsVisitor(boolean loadSource, String sourceFieldName, String[] includes, String[] excludes) { this.loadSource = loadSource; this.sourceFieldName = sourceFieldName; + this.sourceIncludes = includes != null ? includes : Strings.EMPTY_ARRAY; + this.sourceExcludes = excludes != null ? excludes : Strings.EMPTY_ARRAY; requiredFields = new HashSet<>(); reset(); } @@ -162,6 +175,22 @@ public BytesReference source() { return source; } + /** + * Returns the array containing the source fields to include + * @return String[] sourceIncludes + */ + public String[] includes() { + return sourceIncludes; + } + + /** + * Returns the array containing the source fields to exclude + * @return String[] sourceExcludes + */ + public String[] excludes() { + return sourceExcludes; + } + public String id() { return id; } diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java index 11a1b9a97235b..df37b7dbfda98 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java @@ -221,7 +221,7 @@ public int compareTo(DocIdToIndex o) { } } - private FieldsVisitor createStoredFieldsVisitor(SearchContext context, Map> storedToRequestedFields) { + protected FieldsVisitor createStoredFieldsVisitor(SearchContext context, Map> storedToRequestedFields) { StoredFieldsContext storedFieldsContext = context.storedFieldsContext(); if (storedFieldsContext == null) { @@ -230,7 +230,11 @@ private FieldsVisitor createStoredFieldsVisitor(SearchContext context, Map fieldNames = Sets.newHashSet( + "field1", + "field2", + "field3", + "field4", + "field5", + "field6", + "field7", + "field8", + "field9", + "field10", + "field11" + ); + String[] includes = { "field1", "field2", "field3" }; + String[] excludes = { "field7", "field8" }; + + CustomFieldsVisitor fieldsVisitor = new CustomFieldsVisitor(fieldNames, false, includes, excludes); + + assertArrayEquals(fieldsVisitor.includes(), includes); + assertArrayEquals(fieldsVisitor.excludes(), excludes); + + FieldsVisitor fieldsVisitor1 = new FieldsVisitor(false, includes, excludes); + assertArrayEquals(fieldsVisitor1.includes(), includes); + assertArrayEquals(fieldsVisitor1.excludes(), excludes); + + FieldsVisitor fieldsVisitor2 = new FieldsVisitor(false); + assertArrayEquals(fieldsVisitor2.includes(), Strings.EMPTY_ARRAY); + assertArrayEquals(fieldsVisitor2.excludes(), Strings.EMPTY_ARRAY); + + } } diff --git a/server/src/test/java/org/opensearch/search/fetch/FetchPhaseTests.java b/server/src/test/java/org/opensearch/search/fetch/FetchPhaseTests.java index a4820c6cff003..4f03dfd47c512 100644 --- a/server/src/test/java/org/opensearch/search/fetch/FetchPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/fetch/FetchPhaseTests.java @@ -32,8 +32,22 @@ package org.opensearch.search.fetch; +import org.opensearch.index.fieldvisitor.CustomFieldsVisitor; +import org.opensearch.index.fieldvisitor.FieldsVisitor; +import org.opensearch.search.fetch.subphase.FetchSourceContext; +import org.opensearch.search.internal.SearchContext; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class FetchPhaseTests extends OpenSearchTestCase { public void testSequentialDocs() { FetchPhase.DocIdToIndex[] docs = new FetchPhase.DocIdToIndex[10]; @@ -52,4 +66,54 @@ public void testSequentialDocs() { } assertFalse(FetchPhase.hasSequentialDocs(docs)); } + + public void testFieldsVisitorsInFetchPhase() { + + FetchPhase fetchPhase = new FetchPhase(new ArrayList<>()); + SearchContext mockSearchContext = mock(SearchContext.class); + when(mockSearchContext.docIdsToLoadSize()).thenReturn(1); + when(mockSearchContext.docIdsToLoad()).thenReturn(new int[] { 1 }); + String[] includes = new String[] { "field1", "field2" }; + String[] excludes = new String[] { "field7", "field8" }; + + FetchSourceContext mockFetchSourceContext = new FetchSourceContext(true, includes, excludes); + when(mockSearchContext.hasFetchSourceContext()).thenReturn(true); + when(mockSearchContext.fetchSourceContext()).thenReturn(mockFetchSourceContext); + + // Case 1 + // if storedFieldsContext is null + FieldsVisitor fieldsVisitor = fetchPhase.createStoredFieldsVisitor(mockSearchContext, null); + assertArrayEquals(fieldsVisitor.excludes(), excludes); + assertArrayEquals(fieldsVisitor.includes(), includes); + + // Case 2 + // if storedFieldsContext is not null + StoredFieldsContext storedFieldsContext = mock(StoredFieldsContext.class); + when(mockSearchContext.storedFieldsContext()).thenReturn(storedFieldsContext); + + fieldsVisitor = fetchPhase.createStoredFieldsVisitor(mockSearchContext, null); + assertNull(fieldsVisitor); + + // Case 3 + // if storedFieldsContext is true but fieldNames are empty + when(storedFieldsContext.fetchFields()).thenReturn(true); + when(storedFieldsContext.fieldNames()).thenReturn(List.of()); + fieldsVisitor = fetchPhase.createStoredFieldsVisitor(mockSearchContext, Collections.emptyMap()); + assertArrayEquals(fieldsVisitor.excludes(), excludes); + assertArrayEquals(fieldsVisitor.includes(), includes); + + // Case 4 + // if storedToRequested Fields is not empty + // creates an instance of CustomFieldsVisitor + Map> storedToRequestedFields = new HashMap<>(); + storedToRequestedFields.put("test_field_key", Set.of("test_field_value")); + + fieldsVisitor = fetchPhase.createStoredFieldsVisitor(mockSearchContext, storedToRequestedFields); + + assertTrue(fieldsVisitor instanceof CustomFieldsVisitor); + assertArrayEquals(fieldsVisitor.excludes(), excludes); + assertArrayEquals(fieldsVisitor.includes(), includes); + + } + } From bc9e4d8baa6458b83cee220ff08018cebd6028f5 Mon Sep 17 00:00:00 2001 From: panguixin Date: Sat, 25 Jan 2025 02:13:14 +0800 Subject: [PATCH 07/48] Fix exists query for flat object (#17108) --------- Signed-off-by: panguixin --- .../test/index/100_partial_flat_object.yml | 33 ++++++++------- .../index/105_partial_flat_object_nested.yml | 41 +++++++++++-------- .../test/index/91_flat_object_null_value.yml | 2 +- .../index/mapper/FlatObjectFieldMapper.java | 3 +- .../mapper/FlatObjectFieldTypeTests.java | 12 ++++-- 5 files changed, 53 insertions(+), 38 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml index 6fc2654bcfc8f..0c00ff6e4a6a8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml @@ -506,20 +506,6 @@ teardown: - length: { hits.hits: 2 } - # Exists Query with nested dot path, use the flat_object_field_name.last_key - - do: - search: - body: { - _source: true, - query: { - "exists": { - "field": issue.labels.type - } - } - } - - - length: { hits.hits: 3 } - # Exists Query without dot path for the flat_object_field_name - do: search: @@ -613,3 +599,22 @@ teardown: - length: { hits.hits: 1 } - match: { hits.hits.0._source.issue.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + +--- +"Exists query for sub field": + - skip: + version: " - 2.99.99" + reason: "exists query for sub field of flat_object field has bug before 3.0.0" + + - do: + search: + body: { + _source: true, + query: { + "exists": { + "field": issue.labels.category.type + } + } + } + + - length: { hits.hits: 3 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/105_partial_flat_object_nested.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/105_partial_flat_object_nested.yml index 549ddbdde7bab..ccb9c3e3fc23d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/105_partial_flat_object_nested.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/105_partial_flat_object_nested.yml @@ -506,23 +506,6 @@ teardown: - length: { hits.hits: 2 } - # Exists Query with nested dot path, use the flat_object_field_name.last_key - - do: - search: - body: { - _source: true, - query: { - nested: { - path: "issue", - query: { - "exists": { - "field": issue.labels.type - } } } - } - } - - - length: { hits.hits: 2 } - # Exists Query without dot path for the flat_object_field_name - do: search: @@ -634,3 +617,27 @@ teardown: - length: { hits.hits: 1 } - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + +--- +"Exists query for sub field": + - skip: + version: " - 2.99.99" + reason: "exists query for sub field of flat_object field has bug before 3.0.0" + + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "exists": { + "field": issue.labels.category.type + } + } + } + } + } + + - length: { hits.hits: 2 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml index 716b6fb51cb43..455eb4fb91a43 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/91_flat_object_null_value.yml @@ -373,7 +373,7 @@ teardown: body: { _source: true, query: { - exists: { "field": "record.d" } + exists: { "field": "record.name.d.name" } }, sort: [{ order: asc}] } diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index 4425e4e5b0b39..7b1b2615f996d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -478,8 +478,7 @@ public Query existsQuery(QueryShardContext context) { String searchKey; String searchField; if (isSubField()) { - searchKey = this.rootFieldName; - searchField = name(); + return rangeQuery(null, null, true, true, context); } else { if (hasDocValues()) { return new FieldExistsQuery(name()); diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java index 600f04edcd552..2ab1ad689a0b9 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java @@ -418,8 +418,12 @@ public void testExistsQuery() { ft.getValueFieldType(), ft.getValueAndPathFieldType() ); - assertEquals(new TermQuery(new Term("field", "field.bar")), dynamicMappedFieldType.existsQuery(null)); - + Automaton termAutomaton = PrefixQuery.toAutomaton(new BytesRef("field.bar=")); + Automaton dvAutomaton = PrefixQuery.toAutomaton(new BytesRef("field.field.bar=")); + Query indexQuery = new AutomatonQuery(new Term("field" + VALUE_AND_PATH_SUFFIX), termAutomaton, true); + Query dvQuery = new AutomatonQuery(new Term("field" + VALUE_AND_PATH_SUFFIX), dvAutomaton, true, DOC_VALUES_REWRITE); + Query expected = new IndexOrDocValuesQuery(indexQuery, dvQuery); + assertEquals(expected, dynamicMappedFieldType.existsQuery(MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); } { @@ -1176,8 +1180,8 @@ public void testRangeQuery() { ); continue; } - boolean nullLowerTerm = true;// randomBoolean(); - boolean nullUpperTerm = true;// nullLowerTerm == false || randomBoolean(); + boolean nullLowerTerm = randomBoolean(); + boolean nullUpperTerm = nullLowerTerm == false || randomBoolean(); Automaton a1 = PrefixQuery.toAutomaton(new BytesRef("field.field1=")); Automaton a2 = TermRangeQuery.toAutomaton( From 822f80cf2cdc8398d6861827bca2d53f91329f8d Mon Sep 17 00:00:00 2001 From: Tommy Shao <69884021+anntians@users.noreply.github.com> Date: Fri, 24 Jan 2025 11:03:57 -0800 Subject: [PATCH 08/48] Added new Setting property `UnmodifiableOnRestore` to prevent updating settings on restore snapshot (#16957) * Add index.knn setting to list of unmodifiable settings when restore snapshot Signed-off-by: AnnTian Shao * Add index.knn setting to list of unmodifiable settings when restore snapshot Signed-off-by: AnnTian Shao * Add new Setting property UnmodifiableOnRestore to mark setting as immutable on restore snapshot Signed-off-by: AnnTian Shao * Add tests for new Setting property UnmodifiableOnRestore Signed-off-by: AnnTian Shao * fixes and added more tests for new setting property UnmodifiableOnRestore Signed-off-by: AnnTian Shao * fix test failures Signed-off-by: AnnTian Shao * Revert "fix test failures" This reverts commit 252100cb1174316198044e93647c544aac1e4394. Signed-off-by: AnnTian Shao * fixes by adding back USER_UNMODIFIABLE_SETTINGS for settings without Setting implementation Signed-off-by: AnnTian Shao * testing CI config for registering plugin settings Signed-off-by: AnnTian Shao * Revert "testing CI config for registering plugin settings" This reverts commit 9ebab5a7ac9fa3a15b436168f227556bf18ace87. Signed-off-by: AnnTian Shao * Add UnmodifiableOnRestore only to unmodifiable settings that are already registered, will raise separate PR for settings not yet registered. Add validation check in Setting.java. Add UnmodifiableOnRestore settings cannot be removed on restore Signed-off-by: AnnTian Shao * fixes and move tests to RestoreSnapshotIT Signed-off-by: AnnTian Shao * Add back testInvalidRestoreRequestScenarios test method Signed-off-by: AnnTian Shao --------- Signed-off-by: AnnTian Shao Signed-off-by: Tommy Shao <69884021+anntians@users.noreply.github.com> Co-authored-by: AnnTian Shao --- CHANGELOG.md | 1 + .../remotestore/RemoteRestoreSnapshotIT.java | 149 ++++++- .../RestoreShallowSnapshotV2IT.java | 149 ++++++- .../snapshots/RestoreSnapshotIT.java | 388 ++++++++++++++++++ .../cluster/metadata/IndexMetadata.java | 15 +- .../metadata/MetadataCreateIndexService.java | 4 + .../settings/AbstractScopedSettings.java | 8 + .../opensearch/common/settings/Setting.java | 15 +- .../opensearch/snapshots/RestoreService.java | 22 +- .../common/settings/ScopedSettingsTests.java | 24 ++ .../common/settings/SettingTests.java | 16 + 11 files changed, 766 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 63ccb634a137b..17036473e054d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added a new `time` field to replace the deprecated `getTime` field in `GetStats`. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009)) - Improve flat_object field parsing performance by reducing two passes to a single pass ([#16297](https://github.com/opensearch-project/OpenSearch/pull/16297)) - Improve performance of the bitmap filtering([#16936](https://github.com/opensearch-project/OpenSearch/pull/16936/)) +- Added new Setting property UnmodifiableOnRestore to prevent updating settings on restore snapshot ([#16957](https://github.com/opensearch-project/OpenSearch/pull/16957)) - Introduce Template query ([#16818](https://github.com/opensearch-project/OpenSearch/pull/16818)) - Propagate the sourceIncludes and excludes fields from fetchSourceContext to FieldsVisitor. ([#17080](https://github.com/opensearch-project/OpenSearch/pull/17080)) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 70e283791fc3e..3b96636cfe771 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -72,7 +72,9 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; @@ -494,6 +496,51 @@ public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, Exe assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); } + public void testSuccessfulIndexRestoredFromSnapshotWithUpdatedSetting() throws IOException, ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + final int numDocsInIndex1 = randomIntBetween(20, 30); + indexDocuments(client(), indexName1, numDocsInIndex1); + flushAndRefresh(indexName1); + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName1)).get()); + assertFalse(indexExists(indexName1)); + + // try index restore with index.number_of_replicas setting modified. index.number_of_replicas can be modified on restore + Settings numberOfReplicasSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(numberOfReplicasSettings) + .setIndices(indexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + } + protected IndexShard getIndexShard(String node, String indexName) { final Index index = resolveIndex(indexName); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); @@ -706,7 +753,7 @@ public void testInvalidRestoreRequestScenarios() throws Exception { indexDocuments(client, index, numDocsInIndex, numDocsInIndex + randomIntBetween(2, 5)); ensureGreen(index); - // try index restore with remote store disabled + // try index restore with index.remote_store.enabled ignored SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() @@ -721,26 +768,37 @@ public void testInvalidRestoreRequestScenarios() throws Exception { ); assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.enabled] on restore")); - // try index restore with remote store repository modified - Settings remoteStoreIndexSettings = Settings.builder() - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) - .build(); + // try index restore with index.remote_store.segment.repository ignored + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.segment.repository] on restore")); + // try index restore with index.remote_store.translog.repository ignored exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() .cluster() .prepareRestoreSnapshot(snapshotRepo, snapshotName1) .setWaitForCompletion(false) - .setIndexSettings(remoteStoreIndexSettings) + .setIgnoreIndexSettings(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY) .setIndices(index) .setRenamePattern(index) .setRenameReplacement(restoredIndex) .get() ); - assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.translog.repository] on restore")); - // try index restore with remote store repository and translog store repository disabled + // try index restore with index.remote_store.segment.repository and index.remote_store.translog.repository ignored exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() @@ -757,6 +815,81 @@ public void testInvalidRestoreRequestScenarios() throws Exception { .get() ); assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.segment.repository]" + " on restore")); + + // try index restore with index.remote_store.enabled modified + Settings remoteStoreIndexSettings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false).build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.enabled]" + " on restore")); + + // try index restore with index.remote_store.segment.repository modified + Settings remoteStoreSegmentIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreSegmentIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); + + // try index restore with index.remote_store.translog.repository modified + Settings remoteStoreTranslogIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreTranslogIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.translog.repository]" + " on restore")); + + // try index restore with index.remote_store.translog.repository and index.remote_store.segment.repository modified + Settings multipleRemoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(multipleRemoteStoreIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); } public void testCreateSnapshotV2_Orphan_Timestamp_Cleanup() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java index 1493a1b259e13..19953b147bdf9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RestoreShallowSnapshotV2IT.java @@ -67,7 +67,9 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; @@ -561,6 +563,51 @@ public void testRemoteRestoreIndexRestoredFromSnapshot() throws IOException, Exe assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); } + public void testSuccessfulIndexRestoredFromSnapshotWithUpdatedSetting() throws IOException, ExecutionException, InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + final int numDocsInIndex1 = randomIntBetween(20, 30); + indexDocuments(client(), indexName1, numDocsInIndex1); + flushAndRefresh(indexName1); + ensureGreen(indexName1); + + logger.info("--> snapshot"); + SnapshotInfo snapshotInfo1 = createSnapshot(snapshotRepoName, snapshotName1, new ArrayList<>(Arrays.asList(indexName1))); + assertThat(snapshotInfo1.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo1.successfulShards(), equalTo(snapshotInfo1.totalShards())); + assertThat(snapshotInfo1.state(), equalTo(SnapshotState.SUCCESS)); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest(indexName1)).get()); + assertFalse(indexExists(indexName1)); + + // try index restore with index.number_of_replicas setting modified. index.number_of_replicas can be modified on restore + Settings numberOfReplicasSettings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(numberOfReplicasSettings) + .setIndices(indexName1) + .get(); + + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + } + private IndexShard getIndexShard(String node, String indexName) { final Index index = resolveIndex(indexName); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, node); @@ -773,7 +820,7 @@ public void testInvalidRestoreRequestScenarios() throws Exception { indexDocuments(client, index, numDocsInIndex, numDocsInIndex + randomIntBetween(2, 5)); ensureGreen(index); - // try index restore with remote store disabled + // try index restore with index.remote_store.enabled ignored SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() @@ -788,26 +835,37 @@ public void testInvalidRestoreRequestScenarios() throws Exception { ); assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.enabled] on restore")); - // try index restore with remote store repository modified - Settings remoteStoreIndexSettings = Settings.builder() - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) - .build(); + // try index restore with index.remote_store.segment.repository ignored + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.segment.repository] on restore")); + // try index restore with index.remote_store.translog.repository ignored exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() .cluster() .prepareRestoreSnapshot(snapshotRepo, snapshotName1) .setWaitForCompletion(false) - .setIndexSettings(remoteStoreIndexSettings) + .setIgnoreIndexSettings(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY) .setIndices(index) .setRenamePattern(index) .setRenameReplacement(restoredIndex) .get() ); - assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.translog.repository] on restore")); - // try index restore with remote store repository and translog store repository disabled + // try index restore with index.remote_store.segment.repository and index.remote_store.translog.repository ignored exception = expectThrows( SnapshotRestoreException.class, () -> client().admin() @@ -824,6 +882,81 @@ public void testInvalidRestoreRequestScenarios() throws Exception { .get() ); assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.segment.repository]" + " on restore")); + + // try index restore with index.remote_store.enabled modified + Settings remoteStoreIndexSettings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false).build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.enabled]" + " on restore")); + + // try index restore with index.remote_store.segment.repository modified + Settings remoteStoreSegmentIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreSegmentIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); + + // try index restore with index.remote_store.translog.repository modified + Settings remoteStoreTranslogIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreTranslogIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.translog.repository]" + " on restore")); + + // try index restore with index.remote_store.translog.repository and index.remote_store.segment.repository modified + Settings multipleRemoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, newRemoteStoreRepo) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, newRemoteStoreRepo) + .build(); + + exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(multipleRemoteStoreIndexSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.segment.repository]" + " on restore")); } public void testRestoreOperationsUsingDifferentRepos() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index e76587653e99a..36ab97d0b730f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -32,6 +32,7 @@ package org.opensearch.snapshots; +import org.opensearch.Version; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; @@ -49,10 +50,12 @@ import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexSettings; import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.repositories.RepositoriesService; import java.nio.file.Path; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -1112,4 +1115,389 @@ public void testRestoreBalancedReplica() { } } + private String index; + private String snapshotRepo; + private String snapshotName1; + private String snapshotName2; + private Path absolutePath1; + private String restoredIndex; + private Settings indexSettings; + private SnapshotInfo snapshotInfo; + private SnapshotInfo snapshotInfo2; + + public void setupSnapshotRestore() { + index = "test-index"; + snapshotRepo = "test-restore-snapshot-repo"; + snapshotName1 = "test-restore-snapshot1"; + snapshotName2 = "test-restore-snapshot2"; + absolutePath1 = randomRepoPath().toAbsolutePath(); + + logger.info("Snapshot Path [{}]", absolutePath1); + restoredIndex = index + "-restored"; + + createRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, true)); + + indexSettings = getIndexSettings(1, 0).build(); + createIndex(index, indexSettings); + ensureGreen(index); + + logger.info("--> snapshot"); + + snapshotInfo = createSnapshot(snapshotRepo, snapshotName1, new ArrayList<>(List.of(index))); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + updateRepository(snapshotRepo, "fs", getRepositorySettings(absolutePath1, false)); + snapshotInfo2 = createSnapshot(snapshotRepo, snapshotName2, new ArrayList<>(List.of(index))); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); + ensureGreen(index); + } + + public void testInvalidRestoreRequest_UserUnRemovableSettingsIgnored() throws Exception { + setupSnapshotRestore(); + + // try index restore with USER_UNREMOVABLE_SETTINGS setting ignored + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.remote_store.enabled] on restore")); + + } + + public void testInvalidRestoreRequest_UnmodifiableOnRestoreIgnored() throws Exception { + setupSnapshotRestore(); + + // try index restore with UnmodifiableOnRestore setting ignored + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_NUMBER_OF_SHARDS) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove UnmodifiableOnRestore setting [index.number_of_shards] on restore")); + + } + + public void testInvalidRestoreRequest_MixRemovableAndUnmodifiableOnRestoreIgnored() throws Exception { + setupSnapshotRestore(); + + // try index restore with mix of removable and UnmodifiableOnRestore settings ignored + // index.version.created is UnmodifiableOnRestore, index.number_of_search_only_replicas is removable + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_VERSION_CREATED, IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove UnmodifiableOnRestore setting [index.version.created] on restore")); + } + + public void testInvalidRestoreRequest_MixRemovableAndUserUnRemovableSettingsIgnored() throws Exception { + setupSnapshotRestore(); + + // try index restore with mix of removable and USER_UNREMOVABLE_SETTINGS settings ignored + // index.number_of_replicas is USER_UNREMOVABLE_SETTINGS, index.number_of_search_only_replicas is removable + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.number_of_replicas] on restore")); + + } + + public void testInvalidRestoreRequest_MixUnmodifiableOnRestoreAndUserUnRemovableSettingsIgnored() throws Exception { + setupSnapshotRestore(); + + // try index restore with mix of UnmodifiableOnRestore and USER_UNREMOVABLE_SETTINGS settings ignored + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, IndexMetadata.SETTING_NUMBER_OF_SHARDS) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.number_of_replicas]" + " on restore")); + + } + + public void testInvalidRestoreRequest_MultipleUnmodifiableOnRestoreIgnored() throws Exception { + setupSnapshotRestore(); + + // try index restore with multiple UnmodifiableOnRestore settings ignored + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_NUMBER_OF_SHARDS, IndexMetadata.SETTING_VERSION_CREATED) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove UnmodifiableOnRestore setting [index.number_of_shards]" + " on restore")); + + } + + public void testInvalidRestoreRequest_MultipleUserUnRemovableSettingsIgnored() throws Exception { + setupSnapshotRestore(); + + // try index restore with multiple USER_UNREMOVABLE_SETTINGS settings ignored + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot remove setting [index.number_of_replicas]" + " on restore")); + + } + + public void testInvalidRestoreRequest_UnmodifiableOnRestoreModified() throws Exception { + setupSnapshotRestore(); + + // try index restore with UnmodifiableOnRestore setting modified + Settings numberOfShardsSettingsDiff = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(numberOfShardsSettingsDiff) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify UnmodifiableOnRestore setting [index.number_of_shards]" + " on restore")); + + } + + public void testInvalidRestoreRequest_UnmodifiableOnRestoreUnchanged() throws Exception { + setupSnapshotRestore(); + + // try index restore with UnmodifiableOnRestore setting unchanged + Settings numberOfShardsSettingsSame = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(numberOfShardsSettingsSame) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify UnmodifiableOnRestore setting [index.number_of_shards]" + " on restore")); + + } + + public void testInvalidRestoreRequest_UserUnmodifiableSettingsModified() throws Exception { + setupSnapshotRestore(); + + // try index restore with USER_UNMODIFIABLE_SETTINGS setting modified + Settings remoteStoreEnabledSetting = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false).build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreEnabledSetting) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.enabled]" + " on restore")); + + } + + public void testInvalidRestoreRequest_MixModifiableAndUnmodifiableOnRestoreModified() throws Exception { + setupSnapshotRestore(); + + // try index restore with mix of modifiable and UnmodifiableOnRestore settings modified + // index.version.created is UnmodifiableOnRestore, index.number_of_search_only_replicas is modifiable + Settings mixedSettingsUnmodifiableOnRestore = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.V_EMPTY) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(mixedSettingsUnmodifiableOnRestore) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify UnmodifiableOnRestore setting [index.version.created]" + " on restore")); + + } + + public void testInvalidRestoreRequest_MixModifiableAndUserUnmodifiableSettingsModified() throws Exception { + setupSnapshotRestore(); + + // try index restore with mix of modifiable and USER_UNMODIFIABLE_SETTINGS settings modified + // index.remote_store.enabled is USER_UNMODIFIABLE_SETTINGS, index.number_of_search_only_replicas is modifiable + Settings mixedSettingsUserUnmodifiableSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(mixedSettingsUserUnmodifiableSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.enabled]" + " on restore")); + + } + + public void testInvalidRestoreRequest_MixUnmodifiableOnRestoreAndUserUnmodifiableSettingsModified() throws Exception { + setupSnapshotRestore(); + + // try index restore with mix of UnmodifiableOnRestore and USER_UNMODIFIABLE_SETTINGS settings modified + // index.remote_store.enabled is USER_UNMODIFIABLE_SETTINGS, index.version.created is UnmodifiableOnRestore + Settings mixedSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.V_EMPTY) + .build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(mixedSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.enabled]" + " on restore")); + + } + + public void testInvalidRestoreRequest_MultipleUnmodifiableOnRestoreModified() throws Exception { + setupSnapshotRestore(); + + // try index restore with multiple UnmodifiableOnRestore settings modified + Settings unmodifiableOnRestoreSettings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.V_EMPTY) + .build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(unmodifiableOnRestoreSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify UnmodifiableOnRestore setting [index.number_of_shards]" + " on restore")); + + } + + public void testInvalidRestoreRequest_MultipleUserUnmodifiableSettingsModified() throws Exception { + setupSnapshotRestore(); + + // try index restore with multiple USER_UNMODIFIABLE_SETTINGS settings modified + Settings userUnmodifiableSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + + SnapshotRestoreException exception = expectThrows( + SnapshotRestoreException.class, + () -> client().admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepo, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(userUnmodifiableSettings) + .setIndices(index) + .setRenamePattern(index) + .setRenameReplacement(restoredIndex) + .get() + ); + assertTrue(exception.getMessage().contains("cannot modify setting [index.remote_store.enabled]" + " on restore")); + + } + + protected Settings.Builder getIndexSettings(int numOfShards, int numOfReplicas) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s"); + return settingsBuilder; + } + } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index f70282986ad4e..cee331788e4b7 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -230,7 +230,15 @@ static Setting buildNumberOfShardsSetting() { + "]" ); } - return Setting.intSetting(SETTING_NUMBER_OF_SHARDS, defaultNumShards, 1, maxNumShards, Property.IndexScope, Property.Final); + return Setting.intSetting( + SETTING_NUMBER_OF_SHARDS, + defaultNumShards, + 1, + maxNumShards, + Property.IndexScope, + Property.Final, + Property.UnmodifiableOnRestore + ); } public static final String INDEX_SETTING_PREFIX = "index."; @@ -559,13 +567,15 @@ public static APIBlock readFrom(StreamInput input) throws IOException { SETTING_VERSION_CREATED, Version.V_EMPTY, Property.IndexScope, - Property.PrivateIndex + Property.PrivateIndex, + Property.UnmodifiableOnRestore ); public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string"; public static final String SETTING_VERSION_UPGRADED = "index.version.upgraded"; public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string"; public static final String SETTING_CREATION_DATE = "index.creation_date"; + /** * The user provided name for an index. This is the plain string provided by the user when the index was created. * It might still contain date math expressions etc. (added in 5.0) @@ -589,6 +599,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { Function.identity(), Property.IndexScope ); + public static final String INDEX_UUID_NA_VALUE = Strings.UNKNOWN_UUID_VALUE; public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index b5b2b71f977fa..f052e9940bb9a 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -231,6 +231,10 @@ public MetadataCreateIndexService( : null; } + public IndexScopedSettings getIndexScopedSettings() { + return indexScopedSettings; + } + /** * Add a provider to be invoked to get additional index settings prior to an index being created */ diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java index 7655135b06d6c..8c10623e48fe4 100644 --- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java @@ -759,6 +759,14 @@ public boolean isFinalSetting(String key) { return setting != null && setting.isFinal(); } + /** + * Returns true if the setting for the given key is unmodifiableOnRestore. Otherwise false. + */ + public boolean isUnmodifiableOnRestoreSetting(String key) { + final Setting setting = get(key); + return setting != null && setting.isUnmodifiableOnRestore(); + } + /** * Returns a settings object that contains all settings that are not * already set in the given source. The diff contains either the default value for each diff --git a/server/src/main/java/org/opensearch/common/settings/Setting.java b/server/src/main/java/org/opensearch/common/settings/Setting.java index 081029c1c106c..eb63522270e87 100644 --- a/server/src/main/java/org/opensearch/common/settings/Setting.java +++ b/server/src/main/java/org/opensearch/common/settings/Setting.java @@ -171,7 +171,13 @@ public enum Property { /** * Extension scope */ - ExtensionScope + ExtensionScope, + + /** + * Mark this setting as immutable on snapshot restore + * i.e. the setting will not be allowed to be removed or modified during restore + */ + UnmodifiableOnRestore } private final Key key; @@ -208,10 +214,13 @@ private Setting( final EnumSet propertiesAsSet = EnumSet.copyOf(Arrays.asList(properties)); if (propertiesAsSet.contains(Property.Dynamic) && propertiesAsSet.contains(Property.Final)) { throw new IllegalArgumentException("final setting [" + key + "] cannot be dynamic"); + } else if (propertiesAsSet.contains(Property.UnmodifiableOnRestore) && propertiesAsSet.contains(Property.Dynamic)) { + throw new IllegalArgumentException("UnmodifiableOnRestore setting [" + key + "] cannot be dynamic"); } checkPropertyRequiresIndexScope(propertiesAsSet, Property.NotCopyableOnResize); checkPropertyRequiresIndexScope(propertiesAsSet, Property.InternalIndex); checkPropertyRequiresIndexScope(propertiesAsSet, Property.PrivateIndex); + checkPropertyRequiresIndexScope(propertiesAsSet, Property.UnmodifiableOnRestore); checkPropertyRequiresNodeScope(propertiesAsSet, Property.Consistent); this.properties = propertiesAsSet; } @@ -348,6 +357,10 @@ public final boolean isFinal() { return properties.contains(Property.Final); } + public final boolean isUnmodifiableOnRestore() { + return properties.contains(Property.UnmodifiableOnRestore); + } + public final boolean isInternalIndex() { return properties.contains(Property.InternalIndex); } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 4b5bd951f80a0..29ced9d5f0f0c 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -77,6 +77,7 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.ArrayUtils; @@ -122,12 +123,10 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_CREATED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_VERSION_UPGRADED; import static org.opensearch.common.util.FeatureFlags.SEARCHABLE_SNAPSHOT_EXTENDED_COMPATIBILITY; import static org.opensearch.common.util.IndexUtils.filterIndices; @@ -164,8 +163,6 @@ public class RestoreService implements ClusterStateApplier { private static final Set USER_UNMODIFIABLE_SETTINGS = unmodifiableSet( newHashSet( - SETTING_NUMBER_OF_SHARDS, - SETTING_VERSION_CREATED, SETTING_INDEX_UUID, SETTING_CREATION_DATE, SETTING_HISTORY_UUID, @@ -180,7 +177,7 @@ public class RestoreService implements ClusterStateApplier { private static final String REMOTE_STORE_INDEX_SETTINGS_REGEX = "index.remote_store.*"; static { - Set unremovable = new HashSet<>(USER_UNMODIFIABLE_SETTINGS.size() + 4); + Set unremovable = new HashSet<>(USER_UNMODIFIABLE_SETTINGS.size() + 3); unremovable.addAll(USER_UNMODIFIABLE_SETTINGS); unremovable.add(SETTING_NUMBER_OF_REPLICAS); unremovable.add(SETTING_AUTO_EXPAND_REPLICAS); @@ -202,6 +199,8 @@ public class RestoreService implements ClusterStateApplier { private final ClusterSettings clusterSettings; + private final IndexScopedSettings indexScopedSettings; + private final IndicesService indicesService; private final Supplier clusterInfoSupplier; @@ -234,6 +233,7 @@ public RestoreService( this.clusterSettings = clusterService.getClusterSettings(); this.shardLimitValidator = shardLimitValidator; this.indicesService = indicesService; + this.indexScopedSettings = createIndexService.getIndexScopedSettings(); this.clusterInfoSupplier = clusterInfoSupplier; this.dataToFileCacheSizeRatioSupplier = dataToFileCacheSizeRatioSupplier; @@ -835,6 +835,11 @@ private IndexMetadata updateIndexSettings( snapshot, "cannot remove setting [" + ignoredSetting + "] on restore" ); + } else if (indexScopedSettings.isUnmodifiableOnRestoreSetting(ignoredSetting)) { + throw new SnapshotRestoreException( + snapshot, + "cannot remove UnmodifiableOnRestore setting [" + ignoredSetting + "] on restore" + ); } else { keyFilters.add(ignoredSetting); } @@ -853,7 +858,7 @@ private IndexMetadata updateIndexSettings( } Predicate settingsFilter = k -> { - if (USER_UNREMOVABLE_SETTINGS.contains(k) == false) { + if (USER_UNREMOVABLE_SETTINGS.contains(k) == false && !indexScopedSettings.isUnmodifiableOnRestoreSetting(k)) { for (String filterKey : keyFilters) { if (k.equals(filterKey)) { return false; @@ -872,6 +877,11 @@ private IndexMetadata updateIndexSettings( .put(normalizedChangeSettings.filter(k -> { if (USER_UNMODIFIABLE_SETTINGS.contains(k)) { throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + k + "] on restore"); + } else if (indexScopedSettings.isUnmodifiableOnRestoreSetting(k)) { + throw new SnapshotRestoreException( + snapshot, + "cannot modify UnmodifiableOnRestore setting [" + k + "] on restore" + ); } else { return true; } diff --git a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java index 7780481c9deff..55e3cfa34040b 100644 --- a/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ScopedSettingsTests.java @@ -789,6 +789,30 @@ public void testIsFinal() { assertTrue(settings.isFinalSetting("foo.group.key")); } + public void testIsUnmodifiableOnRestore() { + ClusterSettings settings = new ClusterSettings( + Settings.EMPTY, + new HashSet<>( + Arrays.asList( + Setting.intSetting("foo.int", 1, Property.UnmodifiableOnRestore, Property.IndexScope, Property.NodeScope), + Setting.groupSetting("foo.group.", Property.UnmodifiableOnRestore, Property.IndexScope, Property.NodeScope), + Setting.groupSetting("foo.list.", Property.UnmodifiableOnRestore, Property.IndexScope, Property.NodeScope), + Setting.intSetting("foo.int.baz", 1, Property.IndexScope, Property.NodeScope) + ) + ) + ); + + assertFalse(settings.isUnmodifiableOnRestoreSetting("foo.int.baz")); + assertTrue(settings.isUnmodifiableOnRestoreSetting("foo.int")); + + assertFalse(settings.isUnmodifiableOnRestoreSetting("foo.list")); + assertTrue(settings.isUnmodifiableOnRestoreSetting("foo.list.0.key")); + assertTrue(settings.isUnmodifiableOnRestoreSetting("foo.list.key")); + + assertFalse(settings.isUnmodifiableOnRestoreSetting("foo.group")); + assertTrue(settings.isUnmodifiableOnRestoreSetting("foo.group.key")); + } + public void testDiff() throws IOException { Setting fooBarBaz = Setting.intSetting("foo.bar.baz", 1, Property.NodeScope); Setting fooBar = Setting.intSetting("foo.bar", 1, Property.Dynamic, Property.NodeScope); diff --git a/server/src/test/java/org/opensearch/common/settings/SettingTests.java b/server/src/test/java/org/opensearch/common/settings/SettingTests.java index c3c399a9d88b2..a0788b0c83e11 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingTests.java @@ -1439,6 +1439,22 @@ public void testRejectConflictingDynamicAndFinalProperties() { assertThat(ex.getMessage(), containsString("final setting [foo.bar] cannot be dynamic")); } + public void testRejectConflictingDynamicAndUnmodifiableOnRestoreProperties() { + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> Setting.simpleString("foo.bar", Property.UnmodifiableOnRestore, Property.Dynamic) + ); + assertThat(ex.getMessage(), containsString("UnmodifiableOnRestore setting [foo.bar] cannot be dynamic")); + } + + public void testRejectNonIndexScopedUnmodifiableOnRestoreSetting() { + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> Setting.simpleString("foo.bar", Property.UnmodifiableOnRestore) + ); + assertThat(e, hasToString(containsString("non-index-scoped setting [foo.bar] can not have property [UnmodifiableOnRestore]"))); + } + public void testRejectNonIndexScopedNotCopyableOnResizeSetting() { final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, From 757da1a9e0563b8ad0cbcbfe349ac4c3ff75498a Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 24 Jan 2025 11:18:44 -0800 Subject: [PATCH 09/48] Fix Binlong's name in MAINTAINERS.md (#17113) Signed-off-by: Andrew Ross --- MAINTAINERS.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 93821a3da4c71..68d8543ee2725 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -16,7 +16,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Craig Perkins | [cwperks](https://github.com/cwperks) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | -| Gao Binlong | [gaobinlong](https://github.com/gaobinlong) | Amazon | +| Binlong Gao | [gaobinlong](https://github.com/gaobinlong) | Amazon | | Gaurav Bafna | [gbbafna](https://github.com/gbbafna) | Amazon | | Jay Deng | [jed326](https://github.com/jed326) | Amazon | | Kunal Kotwani | [kotwanikunal](https://github.com/kotwanikunal) | Amazon | From 0d7ac2c94c6190ae11584146fea61bd7fc4c4b64 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 24 Jan 2025 14:22:52 -0800 Subject: [PATCH 10/48] Move o.o.action.support.master classes (#17104) Move AcknowledgedResponse, AcknowledgedRequest, AcknowledgedRequestBuilder,and ShardsAcknowledgedResponse to "clustermanager" Java package. This is a purely structural move done via the IDE with no logic changes. Signed-off-by: Andrew Ross --- CHANGELOG-3.0.md | 1 + .../main/java/org/opensearch/client/ClusterClient.java | 2 +- .../main/java/org/opensearch/client/IndicesClient.java | 2 +- .../src/main/java/org/opensearch/client/IngestClient.java | 2 +- .../java/org/opensearch/client/RestHighLevelClient.java | 2 +- .../java/org/opensearch/client/SearchPipelineClient.java | 2 +- .../main/java/org/opensearch/client/SnapshotClient.java | 2 +- .../org/opensearch/client/indices/CloseIndexResponse.java | 2 +- .../opensearch/client/indices/CreateIndexResponse.java | 2 +- .../client/indices/rollover/RolloverResponse.java | 2 +- .../test/java/org/opensearch/client/ClusterClientIT.java | 2 +- .../opensearch/client/ClusterRequestConvertersTests.java | 2 +- .../test/java/org/opensearch/client/IndicesClientIT.java | 2 +- .../opensearch/client/IndicesRequestConvertersTests.java | 2 +- .../test/java/org/opensearch/client/IngestClientIT.java | 2 +- .../opensearch/client/IngestRequestConvertersTests.java | 2 +- .../org/opensearch/client/RequestConvertersTests.java | 2 +- .../org/opensearch/client/SearchPipelineClientIT.java | 2 +- .../src/test/java/org/opensearch/client/SnapshotIT.java | 2 +- .../opensearch/client/SnapshotRequestConvertersTests.java | 2 +- .../opensearch/client/core/AcknowledgedResponseTests.java | 8 ++++---- .../documentation/ClusterClientDocumentationIT.java | 2 +- .../documentation/IndicesClientDocumentationIT.java | 2 +- .../client/documentation/IngestClientDocumentationIT.java | 2 +- .../documentation/SnapshotClientDocumentationIT.java | 2 +- .../documentation/StoredScriptsDocumentationIT.java | 2 +- .../client/indices/CloseIndexResponseTests.java | 4 ++-- .../opensearch/repositories/url/URLSnapshotRestoreIT.java | 2 +- .../search/pipeline/common/SearchPipelineCommonIT.java | 2 +- .../org/opensearch/index/mapper/size/SizeMappingIT.java | 2 +- .../plugin/wlm/action/DeleteQueryGroupAction.java | 2 +- .../plugin/wlm/action/DeleteQueryGroupRequest.java | 2 +- .../wlm/action/TransportDeleteQueryGroupAction.java | 2 +- .../plugin/wlm/service/QueryGroupPersistenceService.java | 2 +- .../wlm/action/TransportDeleteQueryGroupActionTests.java | 2 +- .../plugin/wlm/rest/RestDeleteQueryGroupActionTests.java | 2 +- .../wlm/service/QueryGroupPersistenceServiceTests.java | 2 +- .../action/admin/indices/create/CreateIndexIT.java | 2 +- .../admin/indices/datastream/DataStreamTestCase.java | 2 +- .../org/opensearch/action/bulk/BulkIntegrationIT.java | 2 +- .../java/org/opensearch/aliases/IndexAliasesIT.java | 2 +- .../java/org/opensearch/blocks/SimpleBlocksIT.java | 2 +- .../cluster/coordination/RareClusterStateIT.java | 2 +- .../opensearch/cluster/shards/ClusterShardLimitIT.java | 2 +- .../org/opensearch/gateway/RecoveryFromGatewayIT.java | 2 +- .../org/opensearch/index/mapper/StarTreeMapperIT.java | 2 +- .../java/org/opensearch/index/seqno/RetentionLeaseIT.java | 2 +- .../indices/mapping/UpdateMappingIntegrationIT.java | 2 +- .../indices/state/CloseWhileRelocatingShardsIT.java | 2 +- .../org/opensearch/indices/state/OpenCloseIndexIT.java | 2 +- .../java/org/opensearch/ingest/IngestClientIT.java | 2 +- .../ingest/IngestProcessorNotInstalledOnAllNodesIT.java | 2 +- .../remotestore/RemoteStoreClusterStateRestoreIT.java | 2 +- .../search/suggest/CompletionSuggestSearchIT.java | 2 +- .../java/org/opensearch/snapshots/CloneSnapshotIT.java | 2 +- .../java/org/opensearch/snapshots/CloneSnapshotV2IT.java | 2 +- .../org/opensearch/snapshots/ConcurrentSnapshotsIT.java | 2 +- .../org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java | 2 +- .../snapshots/DedicatedClusterSnapshotRestoreIT.java | 2 +- .../java/org/opensearch/snapshots/DeleteSnapshotIT.java | 2 +- .../java/org/opensearch/snapshots/DeleteSnapshotV2IT.java | 2 +- .../java/org/opensearch/snapshots/RepositoriesIT.java | 2 +- .../org/opensearch/snapshots/SearchableSnapshotIT.java | 2 +- .../awareness/delete/DeleteDecommissionStateResponse.java | 2 +- .../decommission/awareness/put/DecommissionResponse.java | 2 +- .../repositories/cleanup/CleanupRepositoryRequest.java | 2 +- .../repositories/delete/DeleteRepositoryAction.java | 2 +- .../repositories/delete/DeleteRepositoryRequest.java | 2 +- .../delete/DeleteRepositoryRequestBuilder.java | 4 ++-- .../delete/TransportDeleteRepositoryAction.java | 2 +- .../cluster/repositories/put/PutRepositoryAction.java | 2 +- .../cluster/repositories/put/PutRepositoryRequest.java | 2 +- .../repositories/put/PutRepositoryRequestBuilder.java | 4 ++-- .../repositories/put/TransportPutRepositoryAction.java | 2 +- .../repositories/verify/VerifyRepositoryRequest.java | 2 +- .../admin/cluster/reroute/ClusterRerouteRequest.java | 2 +- .../cluster/reroute/ClusterRerouteRequestBuilder.java | 2 +- .../admin/cluster/reroute/ClusterRerouteResponse.java | 2 +- .../cluster/settings/ClusterUpdateSettingsRequest.java | 2 +- .../settings/ClusterUpdateSettingsRequestBuilder.java | 2 +- .../cluster/settings/ClusterUpdateSettingsResponse.java | 2 +- .../delete/ClusterDeleteWeightedRoutingResponse.java | 2 +- .../weighted/put/ClusterPutWeightedRoutingResponse.java | 2 +- .../cluster/snapshots/clone/CloneSnapshotAction.java | 2 +- .../snapshots/clone/CloneSnapshotRequestBuilder.java | 2 +- .../snapshots/clone/TransportCloneSnapshotAction.java | 2 +- .../cluster/snapshots/delete/DeleteSnapshotAction.java | 2 +- .../snapshots/delete/DeleteSnapshotRequestBuilder.java | 2 +- .../snapshots/delete/TransportDeleteSnapshotAction.java | 2 +- .../cluster/storedscripts/DeleteStoredScriptAction.java | 2 +- .../cluster/storedscripts/DeleteStoredScriptRequest.java | 2 +- .../storedscripts/DeleteStoredScriptRequestBuilder.java | 4 ++-- .../cluster/storedscripts/PutStoredScriptAction.java | 2 +- .../cluster/storedscripts/PutStoredScriptRequest.java | 2 +- .../storedscripts/PutStoredScriptRequestBuilder.java | 4 ++-- .../storedscripts/TransportDeleteStoredScriptAction.java | 2 +- .../storedscripts/TransportPutStoredScriptAction.java | 2 +- .../action/admin/indices/alias/IndicesAliasesAction.java | 2 +- .../action/admin/indices/alias/IndicesAliasesRequest.java | 2 +- .../admin/indices/alias/IndicesAliasesRequestBuilder.java | 4 ++-- .../indices/alias/TransportIndicesAliasesAction.java | 2 +- .../action/admin/indices/close/CloseIndexRequest.java | 2 +- .../admin/indices/close/CloseIndexRequestBuilder.java | 2 +- .../action/admin/indices/close/CloseIndexResponse.java | 2 +- .../action/admin/indices/create/CreateIndexRequest.java | 2 +- .../admin/indices/create/CreateIndexRequestBuilder.java | 2 +- .../action/admin/indices/create/CreateIndexResponse.java | 2 +- .../dangling/delete/DeleteDanglingIndexAction.java | 2 +- .../dangling/delete/DeleteDanglingIndexRequest.java | 2 +- .../delete/TransportDeleteDanglingIndexAction.java | 2 +- .../dangling/import_index/ImportDanglingIndexAction.java | 2 +- .../dangling/import_index/ImportDanglingIndexRequest.java | 2 +- .../import_index/TransportImportDanglingIndexAction.java | 2 +- .../admin/indices/datastream/CreateDataStreamAction.java | 4 ++-- .../admin/indices/datastream/DeleteDataStreamAction.java | 2 +- .../action/admin/indices/delete/DeleteIndexAction.java | 2 +- .../action/admin/indices/delete/DeleteIndexRequest.java | 2 +- .../admin/indices/delete/DeleteIndexRequestBuilder.java | 4 ++-- .../admin/indices/delete/TransportDeleteIndexAction.java | 2 +- .../admin/indices/mapping/put/AutoPutMappingAction.java | 2 +- .../admin/indices/mapping/put/PutMappingAction.java | 2 +- .../admin/indices/mapping/put/PutMappingRequest.java | 4 ++-- .../indices/mapping/put/PutMappingRequestBuilder.java | 4 ++-- .../mapping/put/TransportAutoPutMappingAction.java | 2 +- .../indices/mapping/put/TransportPutMappingAction.java | 2 +- .../action/admin/indices/open/OpenIndexRequest.java | 2 +- .../admin/indices/open/OpenIndexRequestBuilder.java | 2 +- .../action/admin/indices/open/OpenIndexResponse.java | 2 +- .../admin/indices/readonly/AddIndexBlockRequest.java | 2 +- .../indices/readonly/AddIndexBlockRequestBuilder.java | 2 +- .../admin/indices/readonly/AddIndexBlockResponse.java | 2 +- .../action/admin/indices/rollover/RolloverRequest.java | 2 +- .../action/admin/indices/rollover/RolloverResponse.java | 2 +- .../settings/put/TransportUpdateSettingsAction.java | 2 +- .../admin/indices/settings/put/UpdateSettingsAction.java | 2 +- .../admin/indices/settings/put/UpdateSettingsRequest.java | 2 +- .../settings/put/UpdateSettingsRequestBuilder.java | 4 ++-- .../action/admin/indices/shrink/ResizeRequest.java | 2 +- .../action/admin/indices/shrink/ResizeRequestBuilder.java | 2 +- .../template/delete/DeleteComponentTemplateAction.java | 2 +- .../delete/DeleteComposableIndexTemplateAction.java | 2 +- .../template/delete/DeleteIndexTemplateAction.java | 2 +- .../delete/DeleteIndexTemplateRequestBuilder.java | 2 +- .../delete/TransportDeleteComponentTemplateAction.java | 2 +- .../TransportDeleteComposableIndexTemplateAction.java | 2 +- .../delete/TransportDeleteIndexTemplateAction.java | 2 +- .../indices/template/put/PutComponentTemplateAction.java | 2 +- .../template/put/PutComposableIndexTemplateAction.java | 2 +- .../indices/template/put/PutIndexTemplateAction.java | 2 +- .../template/put/PutIndexTemplateRequestBuilder.java | 2 +- .../template/put/TransportPutComponentTemplateAction.java | 2 +- .../put/TransportPutComposableIndexTemplateAction.java | 2 +- .../template/put/TransportPutIndexTemplateAction.java | 2 +- .../admin/indices/tiering/HotToWarmTieringResponse.java | 2 +- .../action/admin/indices/tiering/TieringIndexRequest.java | 2 +- .../upgrade/post/TransportUpgradeSettingsAction.java | 2 +- .../admin/indices/upgrade/post/UpgradeSettingsAction.java | 2 +- .../indices/upgrade/post/UpgradeSettingsRequest.java | 2 +- .../action/admin/indices/view/DeleteViewAction.java | 2 +- .../opensearch/action/admin/indices/view/ViewService.java | 2 +- .../opensearch/action/ingest/DeletePipelineAction.java | 2 +- .../opensearch/action/ingest/DeletePipelineRequest.java | 2 +- .../action/ingest/DeletePipelineRequestBuilder.java | 2 +- .../action/ingest/DeletePipelineTransportAction.java | 2 +- .../org/opensearch/action/ingest/PutPipelineAction.java | 2 +- .../org/opensearch/action/ingest/PutPipelineRequest.java | 2 +- .../action/ingest/PutPipelineRequestBuilder.java | 2 +- .../action/ingest/PutPipelineTransportAction.java | 2 +- .../action/search/DeleteSearchPipelineAction.java | 2 +- .../action/search/DeleteSearchPipelineRequest.java | 2 +- .../search/DeleteSearchPipelineTransportAction.java | 2 +- .../opensearch/action/search/PutSearchPipelineAction.java | 2 +- .../action/search/PutSearchPipelineRequest.java | 2 +- .../action/search/PutSearchPipelineTransportAction.java | 2 +- .../{master => clustermanager}/AcknowledgedRequest.java | 3 ++- .../AcknowledgedRequestBuilder.java | 3 ++- .../{master => clustermanager}/AcknowledgedResponse.java | 2 +- .../ShardsAcknowledgedResponse.java | 2 +- .../java/org/opensearch/client/ClusterAdminClient.java | 2 +- .../java/org/opensearch/client/IndicesAdminClient.java | 2 +- .../org/opensearch/client/support/AbstractClient.java | 2 +- .../cluster/metadata/MetadataCreateDataStreamService.java | 2 +- .../cluster/metadata/MetadataIndexTemplateService.java | 2 +- .../cluster/metadata/TemplateUpgradeService.java | 2 +- .../main/java/org/opensearch/ingest/IngestService.java | 2 +- .../cluster/dangling/RestDeleteDanglingIndexAction.java | 2 +- .../cluster/dangling/RestImportDanglingIndexAction.java | 2 +- .../rest/action/admin/indices/RestOpenIndexAction.java | 2 +- .../rest/action/admin/indices/RestResizeHandler.java | 2 +- .../main/java/org/opensearch/script/ScriptService.java | 2 +- .../opensearch/search/pipeline/SearchPipelineService.java | 2 +- .../admin/cluster/reroute/ClusterRerouteRequestTests.java | 2 +- .../clustermanager/ShardsAcknowledgedResponseTests.java | 1 - .../metadata/MetadataIndexTemplateServiceTests.java | 2 +- .../cluster/metadata/TemplateUpgradeServiceTests.java | 2 +- .../blobstore/BlobStoreRepositoryRemoteIndexTests.java | 2 +- .../opensearch/snapshots/RestoreServiceIntegTests.java | 2 +- .../org/opensearch/snapshots/SnapshotResiliencyTests.java | 2 +- .../snapshots/AbstractSnapshotIntegTestCase.java | 2 +- .../java/org/opensearch/test/OpenSearchIntegTestCase.java | 2 +- .../src/main/java/org/opensearch/test/TestCluster.java | 2 +- .../opensearch/test/hamcrest/OpenSearchAssertions.java | 4 ++-- 202 files changed, 218 insertions(+), 216 deletions(-) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/AcknowledgedRequest.java (96%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/AcknowledgedRequestBuilder.java (94%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/AcknowledgedResponse.java (98%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/ShardsAcknowledgedResponse.java (98%) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 6ca1543d85c7d..5d16c117d98fa 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -46,6 +46,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) - Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) +- Move o.o.action.support.master classes ([#17104](https://github.com/opensearch-project/OpenSearch/pull/17104)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java index eb0a8b0e8f40a..0344695ad3848 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.cluster.RemoteInfoResponse; import org.opensearch.client.indices.ComponentTemplatesExistRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java index 281f020533d51..efb65bf6865d4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java @@ -51,7 +51,7 @@ import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.CloseIndexRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java index 29e5c5369f184..55d481ebd07f3 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java @@ -38,7 +38,7 @@ import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.core.action.ActionListener; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 83c3ba8164c4b..f0764989da78e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -68,7 +68,7 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchScrollRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.core.CountRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java index 0014bdb8c8182..17a5e5701ad24 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SearchPipelineClient.java @@ -12,7 +12,7 @@ import org.opensearch.action.search.GetSearchPipelineRequest; import org.opensearch.action.search.GetSearchPipelineResponse; import org.opensearch.action.search.PutSearchPipelineRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.core.action.ActionListener; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java index 87a0e45eafe49..54d09ed2baf11 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java @@ -50,7 +50,7 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.core.action.ActionListener; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java index e32c33484140d..38ce53bcc0dfa 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.client.indices; import org.opensearch.OpenSearchException; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.core.ParseField; import org.opensearch.core.action.support.DefaultShardOperationFailedException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java index 21e2ba4b342a6..24f752c2bbf83 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.client.indices; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ObjectParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java index b146d4f94e131..5d56cba9e8db5 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java @@ -32,7 +32,7 @@ package org.opensearch.client.indices.rollover; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.XContentParser; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 79481fd03b2a1..137c053786349 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.cluster.RemoteConnectionInfo; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.cluster.RemoteInfoResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index 3415868c9f8c6..24d27dcb7f6ad 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index ccdcc21f0fc8b..0399e4667d85d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -65,7 +65,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.CloseIndexRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index c3a0f049f375e..a42c00ab2c2c5 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -53,7 +53,7 @@ import org.opensearch.action.admin.indices.shrink.ResizeType; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.CloseIndexRequest; import org.opensearch.client.indices.CreateDataStreamRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java index 33bff06a83065..5e1510aab0784 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.ingest.SimulateDocumentVerboseResult; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 38dde4be3dd8e..6cadb2d8b72d5 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -40,7 +40,7 @@ import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.test.OpenSearchTestCase; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 38f5d9302440e..a35213c1c3c0c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -62,8 +62,8 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.RequestConverters.EndpointBuilder; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java index 9304be7f21899..7b94b19549633 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchPipelineClientIT.java @@ -12,7 +12,7 @@ import org.opensearch.action.search.GetSearchPipelineRequest; import org.opensearch.action.search.GetSearchPipelineResponse; import org.opensearch.action.search.PutSearchPipelineRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java index 362a8f10d6a77..af32e0d76178d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java @@ -51,7 +51,7 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.rest.RestStatus; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index 8b6910ffebe4a..18b1c9382583e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -45,7 +45,7 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java index e184df7ad013c..758685b24cf24 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java @@ -40,12 +40,12 @@ import static org.hamcrest.Matchers.is; public class AcknowledgedResponseTests extends AbstractResponseTestCase< - org.opensearch.action.support.master.AcknowledgedResponse, + org.opensearch.action.support.clustermanager.AcknowledgedResponse, AcknowledgedResponse> { @Override - protected org.opensearch.action.support.master.AcknowledgedResponse createServerTestInstance(XContentType xContentType) { - return new org.opensearch.action.support.master.AcknowledgedResponse(randomBoolean()); + protected org.opensearch.action.support.clustermanager.AcknowledgedResponse createServerTestInstance(XContentType xContentType) { + return new org.opensearch.action.support.clustermanager.AcknowledgedResponse(randomBoolean()); } @Override @@ -55,7 +55,7 @@ protected AcknowledgedResponse doParseToClientInstance(XContentParser parser) th @Override protected void assertInstances( - org.opensearch.action.support.master.AcknowledgedResponse serverTestInstance, + org.opensearch.action.support.clustermanager.AcknowledgedResponse serverTestInstance, AcknowledgedResponse clientInstance ) { assertThat(clientInstance.isAcknowledged(), is(serverTestInstance.isAcknowledged())); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java index 17ea5b273d2a2..2199824cb910b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java @@ -40,7 +40,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index ce080b45273b4..84995caffa0fe 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -61,7 +61,7 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.GetAliasesResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java index 28909cf58541a..ddea2069f46e7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java @@ -43,7 +43,7 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; import org.opensearch.action.ingest.SimulateProcessorResult; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java index 6949bc382bfe8..b7ee638788f58 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java @@ -51,7 +51,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java index 2e2d15df5392a..11becf8e55942 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java @@ -37,7 +37,7 @@ import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java index 6aafee142bd22..36e71bf9f0830 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java @@ -32,8 +32,8 @@ package org.opensearch.client.indices; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentType; diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index 4c71f80a30926..3027f1406f089 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; diff --git a/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java b/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java index 7e938623975fc..baa0b77981195 100644 --- a/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java +++ b/modules/search-pipeline-common/src/internalClusterTest/java/org/opensearch/search/pipeline/common/SearchPipelineCommonIT.java @@ -19,7 +19,7 @@ import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java index 51e0979324623..7a90974ff1673 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java @@ -33,7 +33,7 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.opensearch.action.get.GetResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.plugin.mapper.MapperSizePlugin; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java index c78952a2f89ad..b638dbd61ca1a 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupAction.java @@ -9,7 +9,7 @@ package org.opensearch.plugin.wlm.action; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for delete QueryGroup diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java index e514943c2c7e9..e798c8e137062 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/DeleteQueryGroupRequest.java @@ -9,7 +9,7 @@ package org.opensearch.plugin.wlm.action; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java index e4d3908d4a208..dd37f9df399ce 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupAction.java @@ -9,8 +9,8 @@ package org.opensearch.plugin.wlm.action; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java index f9332ff3022dc..73dff306d0e69 100644 --- a/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java +++ b/plugins/workload-management/src/main/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceService.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java index 253d65f8da80f..39d263bfdb150 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/action/TransportDeleteQueryGroupActionTests.java @@ -9,7 +9,7 @@ package org.opensearch.plugin.wlm.action; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java index 72191e076bb87..28ed813ec4130 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/rest/RestDeleteQueryGroupActionTests.java @@ -8,7 +8,7 @@ package org.opensearch.plugin.wlm.rest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.unit.TimeValue; diff --git a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java index 08b51fd46cfcf..67e47be1a55ce 100644 --- a/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java +++ b/plugins/workload-management/src/test/java/org/opensearch/plugin/wlm/service/QueryGroupPersistenceServiceTests.java @@ -9,7 +9,7 @@ package org.opensearch.plugin.wlm.service; import org.opensearch.ResourceNotFoundException; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index c44215fc2f327..d713c9cc86841 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -39,7 +39,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader; import org.opensearch.cluster.applicationtemplates.SystemTemplate; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 82ab5b0118c0e..c36f8e38d2cdd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -12,7 +12,7 @@ import org.opensearch.action.admin.indices.rollover.RolloverResponse; import org.opensearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.Template; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java index cf83f20244a4b..52c4deb11fe4a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java @@ -40,7 +40,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.ingest.PutPipelineRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java index 292a13bdf7f5c..0503442728dee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexAbstraction; diff --git a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java index 6275571cc2371..e1370390e74be 100644 --- a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index cc0264f375103..2906aa0e086fb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -37,7 +37,7 @@ import org.opensearch.action.ActionRequest; import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.action.shard.ShardStateAction; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index 3718dce538053..5d9b08f3aa004 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -37,7 +37,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 8d0a3b5788a70..29d60a375fabc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -48,7 +48,7 @@ import org.opensearch.action.admin.indices.stats.IndicesStatsResponse; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Requests; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.ElectionSchedulerFactory; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 007fefa1499ab..87577cf2e24cc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -11,7 +11,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.Rounding; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java index 6163edada9f6e..51a16acb68089 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java @@ -33,7 +33,7 @@ package org.opensearch.index.seqno; import org.opensearch.OpenSearchException; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java index 2c5d408b3b8f6..a3b1b388eb482 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.metadata.MappingMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java index d4df608547a9e..bab1c01321ba9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -33,7 +33,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.IndexRoutingTable; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java index 0bf561c606a2d..e697634355eca 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index 0eb37a7b25618..45f976493edc8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -50,7 +50,7 @@ import org.opensearch.action.ingest.SimulateDocumentBaseResult; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.Requests; import org.opensearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 4c949e11459ab..7a789441c2b55 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -35,7 +35,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.MediaTypeRegistry; diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 6a5adf5ea4fb7..e2fada2e3496c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -18,7 +18,7 @@ import org.opensearch.action.admin.indices.template.put.PutComponentTemplateAction; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.ComponentTemplate; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index a54bc8b6e5fff..0ab86abea963b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -44,7 +44,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index ddbe0520f0c1f..bfaefa1e91a8f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.RepositoryMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java index b00a8c8d64ed9..e0e18f7aaad6f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java @@ -40,7 +40,7 @@ import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index cab750a421f0d..252efcdc979bb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -39,7 +39,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.SnapshotsInProgress; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java index ab5b22c69b517..375ba16d91bc1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java @@ -11,7 +11,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 686853c42aa03..18d23532c84e0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 85d81761ea4a0..d09d087417d6a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -9,7 +9,7 @@ package org.opensearch.snapshots; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java index 7b2ad2bccd2b1..5e70299486825 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java @@ -9,7 +9,7 @@ package org.opensearch.snapshots; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index 271fcf166139f..35fd716c89e2b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.bulk.BulkRequest; import org.opensearch.action.index.IndexRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java index a19bbe49ad340..a952aee7704dd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -22,7 +22,7 @@ import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java index 13b056a019200..32de8080a53bc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -8,7 +8,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.delete; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java index a2401cdf91b07..6a9ed609c0c95 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/put/DecommissionResponse.java @@ -8,7 +8,7 @@ package org.opensearch.action.admin.cluster.decommission.awareness.put; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index 3e408c6114690..27da4349af2df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.repositories.cleanup; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java index 2031e4f7a716f..5f17afe2abf76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Unregister repository action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 04fdf22bee593..4dc686d5943aa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index 6f5d0495e1c9f..1f0ca5593c585 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.repositories.delete; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 3d779befe474e..edf4a39b6d0cf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.repositories.delete; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java index c2f90d869d873..9e56d1dfb3560 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Register repository action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 3fbd5743b5c8c..70d0cba0d823b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.cluster.crypto.CryptoSettings; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 22aa6d7dc7c00..29512261dbddf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.repositories.put; import org.opensearch.action.admin.cluster.crypto.CryptoSettings; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 1eadab6b1352e..eaaa041247b8a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.repositories.put; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index ae6c92d8625ca..0443360a3cfe2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.verify; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index 1cefaa4866110..d4bb83cdd5f29 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.reroute; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index fc8310bdf7852..5ea56c8975b8a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index ff01888040e82..ff00aa21f5c62 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.RoutingExplanations; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 77aee99c2e902..911c191b51c96 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 53f1f17bbeb50..11854a8ad931d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 2dfdb49736773..dd21159329dea 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.ParseField; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java index f6a18ae5055ae..ddcfc2bedb97b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/delete/ClusterDeleteWeightedRoutingResponse.java @@ -8,7 +8,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.delete; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java index 4fee2f05a8715..2fc1c1b2dfd61 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingResponse.java @@ -8,7 +8,7 @@ package org.opensearch.action.admin.cluster.shards.routing.weighted.put; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java index c6fe102544a7e..189b6aa7b7544 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.clone; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for cloning a snapshot diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index 839a1b935ad1f..0585626020c1d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.Strings; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index 6da26d9a2da45..19071447a9556 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.snapshots.clone; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java index 0b98a4b31fd53..60d9cadc0aede 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Delete snapshot action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index f4da1ec0f7785..c2386a0039e88 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.snapshots.delete; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index e8462e4d822f2..b755d27639f80 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.snapshots.delete; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java index 3645ef21d2e12..483004a3365c5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for deleting stored scripts diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index 0bb4f3625ddad..2400cfcc00592 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index cbadde386d5a9..a0c0bd62a3f45 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java index 2845d895a69e8..cc571c2f26136 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for putting stored script diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 8731b18fff338..60c4b32ada73e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -34,7 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index 46773177e9a74..f91bb98ed2f22 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index b0863939fd04c..9688b6659a810 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index 61ee641b4764d..eb2838cbef5c8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java index 4d735e984c34e..9ce10c2853ff6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.alias; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for listing index aliases diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 6ce62dda32d0a..dd915f8857162 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.AliasesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.metadata.AliasAction; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index d262c9cd42ce9..a9ce37455ee36 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.indices.alias; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 42e02e9e1aff8..50f799e9f263f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.RequestValidators; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index e785c31c4a0b9..fd3f3799463c9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java index 92c32c9ace490..b44c05b0a2889 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 2e0c5cb5842b4..af21512081b30 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.close; import org.opensearch.OpenSearchException; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 6bb1bf0a3b97b..0c2e1954ad37e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -41,7 +41,7 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.metadata.Context; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java index 27a580434333a..cf65c295b5d81 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.Context; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java index 3258ffd8672a1..7d7c5206dacc6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java index 6559ef4cd89bd..2ccc422f2edd6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * This action causes a dangling index to be considered as deleted by the cluster. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index c5c03c93785d2..4dcfb1f6b0fb4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index 751a872ee7dcd..1e52eb95c71b0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -42,8 +42,8 @@ import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.opensearch.action.admin.indices.dangling.list.NodeListDanglingIndicesResponse; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java index 5f7a096b1d749..308720aa6139f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.import_index; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Represents a request to import a particular dangling index. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index 2702b6a05c4bb..42a92c5450c7d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.import_index; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 3f47d1bf083f4..86acf78b8b2a0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -42,7 +42,7 @@ import org.opensearch.action.admin.indices.dangling.find.NodeFindDanglingIndexResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.inject.Inject; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java index 4c1690d25fbd9..6b5091e0b2ab5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java @@ -37,9 +37,9 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java index 6b0aec6a31839..2212974985e3f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java @@ -38,9 +38,9 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlockException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java index 696c1244c7504..a3aa9e751a8ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for deleting an index diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java index 5fbefbc6e1591..4bfee40b2d49f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 6cf0920f8570f..e02d46e5fd671 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.delete; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 410a58afc95f1..57f7dcfdf611d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -37,8 +37,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java index f2430eb54db9b..6f0cad2fe178d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to automatically put field mappings. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java index 8bca1b59ee2e2..9088d1241ad2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to put field mappings. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 8122db3278795..e33385c859ab7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -37,8 +37,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index d44b243bb0edb..a32875151fc3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.index.Index; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java index 4722c1048014f..f50acb6e2d56e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index ac797914aafd8..9727c6b62093e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -37,8 +37,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.RequestValidators; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java index f48ec1ae6fb71..2975c8d0ab145 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java index 19770255b0ee1..e31cba2f02a66 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java index 78af1abc3ce31..217e4762b26a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.open; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java index 1fb8514cbf48c..7d283de6d36a6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index ebcdf700d3b6e..14bfd7a74400f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.readonly; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 3ab64fa55af8b..c6677268109c2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.readonly; import org.opensearch.OpenSearchException; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.support.DefaultShardOperationFailedException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index 68c0076bbd302..54f124f39bd3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.ParseField; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index b7df35cd480bb..dc5292c89fb9c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.rollover; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 779b136abef5c..c7457ca7cb137 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java index 2333a2aad6bc6..aa26acb7e3fc5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.settings.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Action for updating index settings diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 45172e313dfcc..a2282ef453189 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 08d7a240aa007..9c8c5b3025aa2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.settings.put; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index a5225f2243876..1459cc124c3bc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index f9d90d46b0904..d645784ab82b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java index 1f427a349c2ea..a4478686dc87c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java index 496358cdfd2b1..43d46f3f35526 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index 789d03f8e8d8c..5773fcf93c49e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.template.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for deleting an index template diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 60771cfa453ae..809624f66e417 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -31,8 +31,8 @@ package org.opensearch.action.admin.indices.template.delete; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index 30cb0cb3e5d00..d1fe08cb5926c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -35,8 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index 27ea64809e3a7..53098447112ac 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -35,8 +35,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index c9542c7a58810..a607166942d4c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -35,8 +35,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java index d12f99ec345d3..c759638e5dc4e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.ComponentTemplate; import org.opensearch.common.Nullable; import org.opensearch.core.common.Strings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java index ed209e18b64ef..abd0b1ad44561 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java @@ -36,8 +36,8 @@ import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Nullable; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java index 06a9f6fbba409..eb21b81350fda 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * An action for putting an index template into the cluster state diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index 931d12de574ae..3b423e42e25e5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.admin.indices.alias.Alias; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java index 6eb87bee9ffa7..66e2fe5c535db 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index 8a31c36d723b4..a5c3590a0a6d7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 4431949c2e42b..b9f27c00d0d98 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -35,8 +35,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java index 275decf7a8ea5..3a74987cfff42 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/HotToWarmTieringResponse.java @@ -8,7 +8,7 @@ package org.opensearch.action.admin.indices.tiering; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java index ed458a47ddb7d..df08b14d85953 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/tiering/TieringIndexRequest.java @@ -11,7 +11,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 286724d78bb63..7e5744bfc5674 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java index 05944e781d109..4c42b4abbf678 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for upgrading index settings diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java index 306d29dd84f13..40cdfdaa811d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java @@ -34,7 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java index abb3c3f4db5f6..413e6700a758c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/DeleteViewAction.java @@ -12,9 +12,9 @@ import org.opensearch.action.ActionType; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java index 294f88decba1f..a7b43b1f51df9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/view/ViewService.java @@ -12,7 +12,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java index 6017be9747912..82bb78a9b89d6 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to delete a pipeline diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java index b9d916e152c3d..8c851dbc86292 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java index bc253db85bb0f..12ae44c6c2956 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java index fe68f06d0d32e..05db9927c906f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java @@ -33,8 +33,8 @@ package org.opensearch.action.ingest; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java index 1fcbd783d246b..be47bff8f4e92 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to put a new pipeline diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java index 06e89b5f2908b..40585b26b5d92 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java @@ -34,7 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java index e8d6a4d332319..42ac84186eaa2 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.bytes.BytesReference; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java index e2d206e8c4f6d..d8e82ece056b5 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java @@ -35,8 +35,8 @@ import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OriginSettingClient; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineAction.java b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineAction.java index 65f8cf3de9506..372cee6323d70 100644 --- a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineAction.java +++ b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineAction.java @@ -9,7 +9,7 @@ package org.opensearch.action.search; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Action type to delete a search pipeline diff --git a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java index 0cde81203063c..083bd9307ffb8 100644 --- a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineRequest.java @@ -9,7 +9,7 @@ package org.opensearch.action.search; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java index ac83a6bb6b765..652791d3105f6 100644 --- a/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/search/DeleteSearchPipelineTransportAction.java @@ -9,8 +9,8 @@ package org.opensearch.action.search; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineAction.java b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineAction.java index 798c8211ee505..5fff11f1298c4 100644 --- a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineAction.java +++ b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineAction.java @@ -9,7 +9,7 @@ package org.opensearch.action.search; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Action type to put a new search pipeline diff --git a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java index 15b4ea648af29..a5ef4e36df832 100644 --- a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java @@ -10,7 +10,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.common.bytes.BytesReference; diff --git a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java index 903b7dfce09c0..312c73698e27f 100644 --- a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineTransportAction.java @@ -11,8 +11,8 @@ import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OriginSettingClient; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java similarity index 96% rename from server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java index 59f238a202788..4543c9392e62b 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java @@ -29,8 +29,9 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; +import org.opensearch.action.support.master.MasterNodeRequest; import org.opensearch.cluster.ack.AckedRequest; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java similarity index 94% rename from server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java index 7a0824c6d30ca..ea50774d17d19 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java @@ -29,9 +29,10 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionType; +import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java similarity index 98% rename from server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java index 279ad401f7e56..d93a1133789fa 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.ParseField; diff --git a/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java similarity index 98% rename from server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java index fd54e810528d3..7173d8462bbd5 100644 --- a/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 5ce4d442fbe0b..23cb267446d7d 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -157,7 +157,7 @@ import org.opensearch.action.search.GetSearchPipelineRequest; import org.opensearch.action.search.GetSearchPipelineResponse; import org.opensearch.action.search.PutSearchPipelineRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.action.ActionFuture; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.action.ActionListener; diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index 588584cd8a280..e68e1faddb1a8 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -128,7 +128,7 @@ import org.opensearch.action.admin.indices.view.CreateViewAction; import org.opensearch.action.admin.indices.view.DeleteViewAction; import org.opensearch.action.admin.indices.view.GetViewAction; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; import org.opensearch.common.action.ActionFuture; diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index f4683ab516cef..07747647535f8 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -395,7 +395,7 @@ import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.action.search.SearchScrollRequestBuilder; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.termvectors.MultiTermVectorsAction; import org.opensearch.action.termvectors.MultiTermVectorsRequest; import org.opensearch.action.termvectors.MultiTermVectorsRequestBuilder; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java index 22ad21f54e556..1cf68d8b29a6f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.ActiveShardsObserver; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateRequest; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index e4afc798cc64d..3af18470df787 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -38,8 +38,8 @@ import org.apache.lucene.util.automaton.Operations; import org.opensearch.Version; import org.opensearch.action.admin.indices.alias.Alias; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java index 10f458561bffe..001c30e14ebbe 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java @@ -38,7 +38,7 @@ import org.opensearch.Version; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index 0315a960dae92..4d65509345614 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -44,7 +44,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index 21003f565be44..ed4eec9dab12c 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.rest.action.admin.cluster.dangling; import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.core.rest.RestStatus; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index ea1e25717b9c0..ea6125f5a80fe 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.rest.action.admin.cluster.dangling; import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.core.rest.RestStatus; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java index e246efaa9af01..8ccd5f8e9450b 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestOpenIndexAction.java @@ -50,7 +50,7 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; -import static org.opensearch.action.support.master.AcknowledgedRequest.DEFAULT_TASK_EXECUTION_TIMEOUT; +import static org.opensearch.action.support.clustermanager.AcknowledgedRequest.DEFAULT_TASK_EXECUTION_TIMEOUT; import static org.opensearch.rest.RestRequest.Method.POST; /** diff --git a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java index 66f1e98228ba3..412693d0b99c1 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/indices/RestResizeHandler.java @@ -53,7 +53,7 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; -import static org.opensearch.action.support.master.AcknowledgedRequest.DEFAULT_TASK_EXECUTION_TIMEOUT; +import static org.opensearch.action.support.clustermanager.AcknowledgedRequest.DEFAULT_TASK_EXECUTION_TIMEOUT; import static org.opensearch.rest.RestRequest.Method.POST; import static org.opensearch.rest.RestRequest.Method.PUT; diff --git a/server/src/main/java/org/opensearch/script/ScriptService.java b/server/src/main/java/org/opensearch/script/ScriptService.java index d3c8861dbc5d7..5ff779948b2da 100644 --- a/server/src/main/java/org/opensearch/script/ScriptService.java +++ b/server/src/main/java/org/opensearch/script/ScriptService.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 27b837740c0ca..7c71771953da9 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -16,7 +16,7 @@ import org.opensearch.action.search.DeleteSearchPipelineRequest; import org.opensearch.action.search.PutSearchPipelineRequest; import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index 129475e0fa3fb..5573cdc290207 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.reroute; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponseTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponseTests.java index 68b7d97fa1427..1d839cc5f2c16 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponseTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponseTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.support.clustermanager; import org.opensearch.Version; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 99e259c8170f3..05ae67d10f4cb 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -34,7 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.applicationtemplates.ClusterStateSystemTemplateLoader; import org.opensearch.cluster.applicationtemplates.SystemTemplateMetadata; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index 562e293083633..ebce050c67539 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.IndicesAdminClient; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index f2b06b0926b81..03a5ba8599f77 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -34,7 +34,7 @@ import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.settings.Settings; diff --git a/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java b/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java index bfda27c97ca4c..f733154c643da 100644 --- a/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java +++ b/server/src/test/java/org/opensearch/snapshots/RestoreServiceIntegTests.java @@ -31,7 +31,7 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.settings.Settings; import org.opensearch.repositories.fs.FsRepository; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 697ec511b54f2..493a3ed431e00 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -106,9 +106,9 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.TransportAction; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.clustermanager.term.GetTermVersionAction; import org.opensearch.action.support.clustermanager.term.TransportGetTermVersionAction; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.update.UpdateHelper; import org.opensearch.client.AdminClient; import org.opensearch.client.node.NodeClient; diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 7db9875387500..d153e8d6aef53 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -38,7 +38,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index cb93622ab8728..e1eafb8570022 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -71,7 +71,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.ClusterAdminClient; diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 8c41e6e5d5b38..f5a439e2ffd02 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.datastream.DeleteDataStreamAction; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 3c145c2bd4e74..cd7669e52cdf8 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -49,8 +49,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; From 77e91c2fe11b86ec682d1d937bec0f2d000392f1 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Fri, 24 Jan 2025 15:25:43 -0800 Subject: [PATCH 11/48] Stop processing search requests when _msearch is canceled (#17005) Prior to this fix, the _msearch API would keep running search requests even after being canceled. With this change, we explicitly check if the task has been canceled before kicking off subsequent requests. --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../search/TransportMultiSearchAction.java | 24 ++++ .../TransportMultiSearchActionTests.java | 118 ++++++++++++++++++ 3 files changed, 143 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 17036473e054d..ec7742b8563bc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -111,6 +111,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) - The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) +- Stop processing search requests when _msearch request is cancelled ([#17005](https://github.com/opensearch-project/OpenSearch/pull/17005)) - Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) diff --git a/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java index 146b4010af4b3..dcb2ce6eb88da 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportMultiSearchAction.java @@ -44,6 +44,9 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -193,6 +196,19 @@ private void handleResponse(final int responseSlot, final MultiSearchResponse.It if (responseCounter.decrementAndGet() == 0) { assert requests.isEmpty(); finish(); + } else if (isCancelled(request.request.getParentTask())) { + // Drain the rest of the queue + SearchRequestSlot request; + while ((request = requests.poll()) != null) { + responses.set( + request.responseSlot, + new MultiSearchResponse.Item(null, new TaskCancelledException("Parent task was cancelled")) + ); + if (responseCounter.decrementAndGet() == 0) { + assert requests.isEmpty(); + finish(); + } + } } else { if (thread == Thread.currentThread()) { // we are on the same thread, we need to fork to another thread to avoid recursive stack overflow on a single thread @@ -220,6 +236,14 @@ private long buildTookInMillis() { }); } + private boolean isCancelled(TaskId taskId) { + if (taskId.isSet()) { + CancellableTask task = taskManager.getCancellableTask(taskId.getId()); + return task != null && task.isCancelled(); + } + return false; + } + /** * Slots a search request * diff --git a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java index 48970e2b96add..45980e7137ce4 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java @@ -49,7 +49,9 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskListener; import org.opensearch.tasks.TaskManager; import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.OpenSearchTestCase; @@ -62,7 +64,9 @@ import java.util.IdentityHashMap; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -289,4 +293,118 @@ public void testDefaultMaxConcurrentSearches() { assertThat(result, equalTo(1)); } + public void testCancellation() { + // Initialize dependencies of TransportMultiSearchAction + Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + ThreadPool threadPool = new ThreadPool(settings); + TransportService transportService = new TransportService( + Settings.EMPTY, + mock(Transport.class), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(), UUIDs.randomBase64UUID()), + null, + Collections.emptySet(), + NoopTracer.INSTANCE + ) { + @Override + public TaskManager getTaskManager() { + return taskManager; + } + }; + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.state()).thenReturn(ClusterState.builder(new ClusterName("test")).build()); + + // Keep track of the number of concurrent searches started by multi search api, + // and if there are more searches than is allowed create an error and remember that. + int maxAllowedConcurrentSearches = 1; // Allow 1 search at a time. + AtomicInteger counter = new AtomicInteger(); + AtomicReference errorHolder = new AtomicReference<>(); + // randomize whether or not requests are executed asynchronously + ExecutorService executorService = threadPool.executor(ThreadPool.Names.GENERIC); + final Set requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>())); + CountDownLatch countDownLatch = new CountDownLatch(1); + CancellableTask[] parentTask = new CancellableTask[1]; + NodeClient client = new NodeClient(settings, threadPool) { + @Override + public void search(final SearchRequest request, final ActionListener listener) { + if (parentTask[0] != null && parentTask[0].isCancelled()) { + fail("Should not execute search after parent task is cancelled"); + } + try { + countDownLatch.await(10, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + requests.add(request); + executorService.execute(() -> { + counter.decrementAndGet(); + listener.onResponse( + new SearchResponse( + InternalSearchResponse.empty(), + null, + 0, + 0, + 0, + 0L, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); + }); + } + + @Override + public String getLocalNodeId() { + return "local_node_id"; + } + }; + + TransportMultiSearchAction action = new TransportMultiSearchAction( + threadPool, + actionFilters, + transportService, + clusterService, + 10, + System::nanoTime, + client + ); + + // Execute the multi search api and fail if we find an error after executing: + try { + /* + * Allow for a large number of search requests in a single batch as previous implementations could stack overflow if the number + * of requests in a single batch was large + */ + int numSearchRequests = scaledRandomIntBetween(1024, 8192); + MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); + multiSearchRequest.maxConcurrentSearchRequests(maxAllowedConcurrentSearches); + for (int i = 0; i < numSearchRequests; i++) { + multiSearchRequest.add(new SearchRequest()); + } + MultiSearchResponse[] responses = new MultiSearchResponse[1]; + Exception[] exceptions = new Exception[1]; + parentTask[0] = (CancellableTask) action.execute(multiSearchRequest, new TaskListener<>() { + @Override + public void onResponse(Task task, MultiSearchResponse items) { + responses[0] = items; + } + + @Override + public void onFailure(Task task, Exception e) { + exceptions[0] = e; + } + }); + parentTask[0].cancel("Giving up"); + countDownLatch.countDown(); + + assertNull(responses[0]); + assertNull(exceptions[0]); + } finally { + assertTrue(OpenSearchTestCase.terminate(threadPool)); + } + } } From 46f852a5f5b984623f2a6d377d4bfed912c4fee6 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Sat, 25 Jan 2025 09:51:40 -0800 Subject: [PATCH 12/48] Refactor bootstrap for JPMS support (#17117) Signed-off-by: Prudhvi Godithi --- CHANGELOG-3.0.md | 1 + .../opensearch/gradle/precommit/JarHellTask.java | 2 +- .../gradle/precommit/ThirdPartyAuditTask.java | 2 +- .../opensearch/gradle/test/TestClasspathUtils.java | 2 +- .../opensearch/plugins/InstallPluginCommand.java | 2 +- .../opensearch/{ => common}/bootstrap/JarHell.java | 2 +- .../{ => common}/bootstrap/JdkJarHellCheck.java | 2 +- .../{ => common}/bootstrap/package-info.java | 2 +- .../{ => common}/bootstrap/JarHellTests.java | 2 +- .../{ => common}/bootstrap/duplicate-classes.jar | Bin .../bootstrap/duplicate-xmlbeans-classes.jar | Bin .../org/opensearch/ingest/attachment/TikaImpl.java | 2 +- .../java/org/opensearch/bootstrap/Bootstrap.java | 1 + .../java/org/opensearch/bootstrap/Security.java | 1 + .../java/org/opensearch/plugins/PluginInfo.java | 2 +- .../java/org/opensearch/plugins/PluginsService.java | 2 +- .../resources/org/opensearch/bootstrap/test.policy | 0 .../org/opensearch/plugins/PluginsServiceTests.java | 2 +- .../opensearch/bootstrap/BootstrapForTesting.java | 1 + 19 files changed, 16 insertions(+), 12 deletions(-) rename libs/common/src/main/java/org/opensearch/{ => common}/bootstrap/JarHell.java (99%) rename libs/common/src/main/java/org/opensearch/{ => common}/bootstrap/JdkJarHellCheck.java (98%) rename libs/common/src/main/java/org/opensearch/{ => common}/bootstrap/package-info.java (85%) rename libs/common/src/test/java/org/opensearch/{ => common}/bootstrap/JarHellTests.java (99%) rename libs/common/src/test/resources/org/opensearch/{ => common}/bootstrap/duplicate-classes.jar (100%) rename libs/common/src/test/resources/org/opensearch/{ => common}/bootstrap/duplicate-xmlbeans-classes.jar (100%) rename {libs/common/src/test => server/src/main}/resources/org/opensearch/bootstrap/test.policy (100%) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 5d16c117d98fa..727684b9542b4 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -27,6 +27,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) - Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) +- Refactor `:libs` module `bootstrap` package to eliminate top level split packages [#17117](https://github.com/opensearch-project/OpenSearch/pull/17117)) ### Deprecated diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java index ebe0b25a3a685..47ad8cc524a3b 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java @@ -63,7 +63,7 @@ public JarHellTask(Project project) { public void runJarHellCheck() { LoggedExec.javaexec(project, spec -> { spec.environment("CLASSPATH", getClasspath().getAsPath()); - spec.getMainClass().set("org.opensearch.bootstrap.JarHell"); + spec.getMainClass().set("org.opensearch.common.bootstrap.JarHell"); }); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index 2ed801b7fb9c6..70a1ed478ff63 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -94,7 +94,7 @@ public class ThirdPartyAuditTask extends DefaultTask { CliMain.EXIT_VIOLATION, CliMain.EXIT_UNSUPPORTED_JDK ); - private static final String JDK_JAR_HELL_MAIN_CLASS = "org.opensearch.bootstrap.JdkJarHellCheck"; + private static final String JDK_JAR_HELL_MAIN_CLASS = "org.opensearch.common.bootstrap.JdkJarHellCheck"; private Set missingClassExcludes = new TreeSet<>(); diff --git a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/TestClasspathUtils.java b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/TestClasspathUtils.java index ec9a5fb157ccc..84362966d7300 100644 --- a/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/TestClasspathUtils.java +++ b/buildSrc/src/testFixtures/java/org/opensearch/gradle/test/TestClasspathUtils.java @@ -48,7 +48,7 @@ public class TestClasspathUtils { public static void setupJarJdkClasspath(File projectRoot) { try { URL originLocation = TestClasspathUtils.class.getClassLoader() - .loadClass("org.opensearch.bootstrap.JdkJarHellCheck") + .loadClass("org.opensearch.common.bootstrap.JdkJarHellCheck") .getProtectionDomain() .getCodeSource() .getLocation(); diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java index 511d6974085aa..d5a0102ba86af 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java @@ -52,12 +52,12 @@ import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentVerifierBuilderProvider; import org.opensearch.Build; import org.opensearch.Version; -import org.opensearch.bootstrap.JarHell; import org.opensearch.cli.EnvironmentAwareCommand; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.collect.Tuple; import org.opensearch.common.hash.MessageDigests; import org.opensearch.common.util.io.IOUtils; diff --git a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java b/libs/common/src/main/java/org/opensearch/common/bootstrap/JarHell.java similarity index 99% rename from libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java rename to libs/common/src/main/java/org/opensearch/common/bootstrap/JarHell.java index fc5e364241d12..470b92aaa2fab 100644 --- a/libs/common/src/main/java/org/opensearch/bootstrap/JarHell.java +++ b/libs/common/src/main/java/org/opensearch/common/bootstrap/JarHell.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.bootstrap; +package org.opensearch.common.bootstrap; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; diff --git a/libs/common/src/main/java/org/opensearch/bootstrap/JdkJarHellCheck.java b/libs/common/src/main/java/org/opensearch/common/bootstrap/JdkJarHellCheck.java similarity index 98% rename from libs/common/src/main/java/org/opensearch/bootstrap/JdkJarHellCheck.java rename to libs/common/src/main/java/org/opensearch/common/bootstrap/JdkJarHellCheck.java index 97b323975db0a..2a25f32b363c6 100644 --- a/libs/common/src/main/java/org/opensearch/bootstrap/JdkJarHellCheck.java +++ b/libs/common/src/main/java/org/opensearch/common/bootstrap/JdkJarHellCheck.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.bootstrap; +package org.opensearch.common.bootstrap; import org.opensearch.common.SuppressForbidden; diff --git a/libs/common/src/main/java/org/opensearch/bootstrap/package-info.java b/libs/common/src/main/java/org/opensearch/common/bootstrap/package-info.java similarity index 85% rename from libs/common/src/main/java/org/opensearch/bootstrap/package-info.java rename to libs/common/src/main/java/org/opensearch/common/bootstrap/package-info.java index f522b1bb91444..8d05b614b7f38 100644 --- a/libs/common/src/main/java/org/opensearch/bootstrap/package-info.java +++ b/libs/common/src/main/java/org/opensearch/common/bootstrap/package-info.java @@ -7,4 +7,4 @@ */ /** Contains JarHell Classes */ -package org.opensearch.bootstrap; +package org.opensearch.common.bootstrap; diff --git a/libs/common/src/test/java/org/opensearch/bootstrap/JarHellTests.java b/libs/common/src/test/java/org/opensearch/common/bootstrap/JarHellTests.java similarity index 99% rename from libs/common/src/test/java/org/opensearch/bootstrap/JarHellTests.java rename to libs/common/src/test/java/org/opensearch/common/bootstrap/JarHellTests.java index d1851850e78e1..549c4bd652e2f 100644 --- a/libs/common/src/test/java/org/opensearch/bootstrap/JarHellTests.java +++ b/libs/common/src/test/java/org/opensearch/common/bootstrap/JarHellTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.bootstrap; +package org.opensearch.common.bootstrap; import org.opensearch.common.io.PathUtils; import org.opensearch.core.common.Strings; diff --git a/libs/common/src/test/resources/org/opensearch/bootstrap/duplicate-classes.jar b/libs/common/src/test/resources/org/opensearch/common/bootstrap/duplicate-classes.jar similarity index 100% rename from libs/common/src/test/resources/org/opensearch/bootstrap/duplicate-classes.jar rename to libs/common/src/test/resources/org/opensearch/common/bootstrap/duplicate-classes.jar diff --git a/libs/common/src/test/resources/org/opensearch/bootstrap/duplicate-xmlbeans-classes.jar b/libs/common/src/test/resources/org/opensearch/common/bootstrap/duplicate-xmlbeans-classes.jar similarity index 100% rename from libs/common/src/test/resources/org/opensearch/bootstrap/duplicate-xmlbeans-classes.jar rename to libs/common/src/test/resources/org/opensearch/common/bootstrap/duplicate-xmlbeans-classes.jar diff --git a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java index fe783e5ddb675..d999d20537485 100644 --- a/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java +++ b/plugins/ingest-attachment/src/main/java/org/opensearch/ingest/attachment/TikaImpl.java @@ -41,8 +41,8 @@ import org.apache.tika.parser.ParserDecorator; import org.opensearch.SpecialPermission; import org.opensearch.bootstrap.FilePermissionUtils; -import org.opensearch.bootstrap.JarHell; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.io.PathUtils; import java.io.ByteArrayInputStream; diff --git a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java index 4e167d10b99fa..95498f2bcbcd1 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java @@ -47,6 +47,7 @@ import org.opensearch.cli.UserException; import org.opensearch.common.PidFile; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.inject.CreationException; import org.opensearch.common.logging.LogConfigurator; import org.opensearch.common.logging.Loggers; diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 563a026109059..acf2d7ec6a5ac 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -34,6 +34,7 @@ import org.opensearch.cli.Command; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index 4ff699e8017ba..323e061aea567 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -36,8 +36,8 @@ import com.fasterxml.jackson.core.json.JsonReadFeature; import org.opensearch.Version; -import org.opensearch.bootstrap.JarHell; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.xcontent.json.JsonXContentParser; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 9bc1f1334122e..72b8ada94a0d1 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -43,7 +43,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; -import org.opensearch.bootstrap.JarHell; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.collect.Tuple; import org.opensearch.common.inject.Module; import org.opensearch.common.lifecycle.LifecycleComponent; diff --git a/libs/common/src/test/resources/org/opensearch/bootstrap/test.policy b/server/src/main/resources/org/opensearch/bootstrap/test.policy similarity index 100% rename from libs/common/src/test/resources/org/opensearch/bootstrap/test.policy rename to server/src/main/resources/org/opensearch/bootstrap/test.policy diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index f5702fa1a7ade..cb549eafc0d21 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -38,7 +38,7 @@ import org.apache.lucene.util.Constants; import org.opensearch.LegacyESVersion; import org.opensearch.Version; -import org.opensearch.bootstrap.JarHell; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; diff --git a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java index 933385dedcf49..76c7ce0628aac 100644 --- a/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java +++ b/test/framework/src/main/java/org/opensearch/bootstrap/BootstrapForTesting.java @@ -39,6 +39,7 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.bootstrap.JarHell; import org.opensearch.common.io.PathUtils; import org.opensearch.common.network.IfConfig; import org.opensearch.common.network.NetworkAddress; From 7e5108834191f1ac62319354b854e2a7e6170cca Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Mon, 27 Jan 2025 09:54:33 +0530 Subject: [PATCH 13/48] Fix flaky test RemoteSegmentTransferTrackerTests.testGetInflightUploadBytes (#17128) Signed-off-by: Sachin Kale --- .../index/remote/RemoteSegmentTransferTrackerTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index f4101cb054687..a130e4a6361fc 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -258,7 +258,7 @@ public void testGetInflightUploadBytes() { directoryFileTransferTracker, remoteStoreStatsTrackerFactory.getMovingAverageWindowSize() ); - long bytesStarted = randomLongBetween(10000, 100000); + long bytesStarted = randomLongBetween(12000, 100000); long bytesSucceeded = randomLongBetween(1000, 10000); long bytesFailed = randomLongBetween(100, 1000); transferTracker.addUploadBytesStarted(bytesStarted); From 3032bef54d502836789ea438f464ae0b1ba978b2 Mon Sep 17 00:00:00 2001 From: Sooraj Sinha <81695996+soosinha@users.noreply.github.com> Date: Mon, 27 Jan 2025 11:55:50 +0530 Subject: [PATCH 14/48] Add opensearch version info while deserialization (#16494) Signed-off-by: Sooraj Sinha --- CHANGELOG.md | 1 + .../gateway/remote/RemoteClusterStateService.java | 3 ++- .../gateway/remote/RemoteGlobalMetadataManager.java | 3 ++- .../gateway/remote/model/RemoteCustomMetadata.java | 12 +++++++----- .../remote/RemoteGlobalMetadataManagerTests.java | 11 ++++++++++- .../remote/model/RemoteCustomMetadataTests.java | 12 ++++++++---- 6 files changed, 30 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec7742b8563bc..01b58b0426bbb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Stop processing search requests when _msearch request is cancelled ([#17005](https://github.com/opensearch-project/OpenSearch/pull/17005)) - Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) +- Use OpenSearch version to deserialize remote custom metadata([#16494](https://github.com/opensearch-project/OpenSearch/pull/16494)) ### Security diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 778ab3e56cf76..62a06615f36dd 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -1281,7 +1281,8 @@ ClusterState readClusterStateInParallel( entry.getKey(), clusterUUID, blobStoreRepository.getCompressor(), - namedWriteableRegistry + namedWriteableRegistry, + manifest.getOpensearchVersion() ), listener ); diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java index 763a8e3ff4951..c7434144e10f8 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManager.java @@ -242,7 +242,8 @@ Metadata getGlobalMetadata(String clusterUUID, ClusterMetadataManifest clusterMe key, clusterUUID, compressor, - namedWriteableRegistry + namedWriteableRegistry, + clusterMetadataManifest.getOpensearchVersion() ); builder.putCustom(key, (Custom) getStore(remoteCustomMetadata).read(remoteCustomMetadata)); } catch (IOException e) { diff --git a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java index 8e850e903954a..03055a0be0e64 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java +++ b/server/src/main/java/org/opensearch/gateway/remote/model/RemoteCustomMetadata.java @@ -8,6 +8,7 @@ package org.opensearch.gateway.remote.model; +import org.opensearch.Version; import org.opensearch.cluster.metadata.Metadata.Custom; import org.opensearch.common.io.Streams; import org.opensearch.common.remote.AbstractClusterMetadataWriteableBlobEntity; @@ -67,16 +68,17 @@ public RemoteCustomMetadata( final String customType, final String clusterUUID, final Compressor compressor, - final NamedWriteableRegistry namedWriteableRegistry + final NamedWriteableRegistry namedWriteableRegistry, + final Version version ) { super(clusterUUID, compressor, null); this.blobName = blobName; this.customType = customType; this.namedWriteableRegistry = namedWriteableRegistry; - this.customBlobStoreFormat = new ChecksumWritableBlobStoreFormat<>( - "custom", - is -> readFrom(is, namedWriteableRegistry, customType) - ); + this.customBlobStoreFormat = new ChecksumWritableBlobStoreFormat<>("custom", is -> { + is.setVersion(version); + return readFrom(is, namedWriteableRegistry, customType); + }); } @Override diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java index a2da1e8b0fdb2..591fbf31a3021 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java @@ -8,6 +8,7 @@ package org.opensearch.gateway.remote; +import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; @@ -480,6 +481,12 @@ public void testGetAsyncWriteRunnable_TemplatesMetadata() throws Exception { } public void testGetAsyncReadRunnable_CustomMetadata() throws Exception { + for (Version version : List.of(Version.CURRENT, Version.V_2_15_0, Version.V_2_13_0)) { + verifyCustomMetadataReadForVersion(version); + } + } + + private void verifyCustomMetadataReadForVersion(Version version) throws Exception { Metadata.Custom customMetadata = getCustomMetadata(); String fileName = randomAlphaOfLength(10); RemoteCustomMetadata customMetadataForDownload = new RemoteCustomMetadata( @@ -487,7 +494,8 @@ public void testGetAsyncReadRunnable_CustomMetadata() throws Exception { IndexGraveyard.TYPE, CLUSTER_UUID, compressor, - namedWriteableRegistry + namedWriteableRegistry, + version ); when(blobStoreTransferService.downloadBlob(anyIterable(), anyString())).thenReturn( customMetadataForDownload.customBlobStoreFormat.serialize(customMetadata, fileName, compressor).streamInput() @@ -695,4 +703,5 @@ public void testGetUpdatedCustoms() { assertThat(customsDiff.getUpserts(), is(expectedUpserts)); assertThat(customsDiff.getDeletes(), is(List.of(CustomMetadata1.TYPE))); } + } diff --git a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java index 60cceb205f43d..46c0b4d360665 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/model/RemoteCustomMetadataTests.java @@ -106,7 +106,8 @@ public void testClusterUUID() { "test-custom", clusterUUID, compressor, - namedWriteableRegistry + namedWriteableRegistry, + Version.CURRENT ); assertThat(remoteObjectForDownload.clusterUUID(), is(clusterUUID)); } @@ -128,7 +129,8 @@ public void testFullBlobName() { "test-custom", clusterUUID, compressor, - namedWriteableRegistry + namedWriteableRegistry, + Version.CURRENT ); assertThat(remoteObjectForDownload.getFullBlobName(), is(TEST_BLOB_NAME)); } @@ -150,7 +152,8 @@ public void testBlobFileName() { "test-custom", clusterUUID, compressor, - namedWriteableRegistry + namedWriteableRegistry, + Version.CURRENT ); assertThat(remoteObjectForDownload.getBlobFileName(), is(TEST_BLOB_FILE_NAME)); } @@ -162,7 +165,8 @@ public void testBlobPathTokens() { "test-custom", clusterUUID, compressor, - namedWriteableRegistry + namedWriteableRegistry, + Version.CURRENT ); assertThat(remoteObjectForDownload.getBlobPathTokens(), is(new String[] { "user", "local", "opensearch", "customMetadata" })); } From 8a4945d751e4f3177f314f25e0ae1da4ef134d71 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Mon, 27 Jan 2025 12:06:57 -0800 Subject: [PATCH 15/48] Remove package `org.opensearch.action.support.master` (#17118) All classes in this package have been deprecated and can be removed for the 3.0 release. Signed-off-by: Andrew Ross --- CHANGELOG-3.0.md | 2 +- .../admin/indices/get/GetIndexRequest.java | 2 +- .../indices/get/GetIndexRequestBuilder.java | 2 +- .../mapping/get/GetMappingsRequest.java | 2 +- .../get/GetMappingsRequestBuilder.java | 2 +- .../clustermanager/AcknowledgedRequest.java | 3 +- .../AcknowledgedRequestBuilder.java | 3 +- .../MasterNodeOperationRequestBuilder.java | 56 ------------ ...MasterNodeReadOperationRequestBuilder.java | 56 ------------ .../support/master/MasterNodeReadRequest.java | 53 ----------- .../support/master/MasterNodeRequest.java | 54 ----------- .../master/TransportMasterNodeAction.java | 88 ------------------ .../master/TransportMasterNodeReadAction.java | 89 ------------------- .../master/info/ClusterInfoRequest.java | 53 ----------- .../info/ClusterInfoRequestBuilder.java | 52 ----------- .../info/TransportClusterInfoAction.java | 62 ------------- .../support/master/info/package-info.java | 15 ---- .../action/support/master/package-info.java | 15 ---- .../indices/get/GetIndexRequestTests.java | 21 ----- 19 files changed, 7 insertions(+), 623 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/info/package-info.java delete mode 100644 server/src/main/java/org/opensearch/action/support/master/package-info.java delete mode 100644 server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 727684b9542b4..8d8adfd1e3566 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -47,7 +47,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remove Version.V_1_ Constants ([#5021](https://github.com/opensearch-project/OpenSearch/pull/5021)) - Remove custom Map, List and Set collection classes ([#6871](https://github.com/opensearch-project/OpenSearch/pull/6871)) - Remove `index.store.hybrid.mmap.extensions` setting in favor of `index.store.hybrid.nio.extensions` setting ([#9392](https://github.com/opensearch-project/OpenSearch/pull/9392)) -- Move o.o.action.support.master classes ([#17104](https://github.com/opensearch-project/OpenSearch/pull/17104)) +- Remove package org.opensearch.action.support.master ([#4856](https://github.com/opensearch-project/OpenSearch/issues/4856)) ### Fixed - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index 601b53f88baa3..da3d9bc9dcbbd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.util.ArrayUtils; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index e97319abe5f98..c90d46808a53b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.get; -import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index cd0ecdb30e5fa..72c5be85cdf92 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 36ca1cb088cb5..e97c0befebd9d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.mapping.get; -import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.annotation.PublicApi; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java index 4543c9392e62b..9c5973bd2575e 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.support.clustermanager; -import org.opensearch.action.support.master.MasterNodeRequest; import org.opensearch.cluster.ack.AckedRequest; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.StreamInput; @@ -48,7 +47,7 @@ * * @opensearch.internal */ -public abstract class AcknowledgedRequest> extends MasterNodeRequest +public abstract class AcknowledgedRequest> extends ClusterManagerNodeRequest implements AckedRequest { diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java index ea50774d17d19..fa957f159ec9d 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; @@ -44,7 +43,7 @@ public abstract class AcknowledgedRequestBuilder< Request extends AcknowledgedRequest, Response extends AcknowledgedResponse, - RequestBuilder extends AcknowledgedRequestBuilder> extends MasterNodeOperationRequestBuilder< + RequestBuilder extends AcknowledgedRequestBuilder> extends ClusterManagerNodeOperationRequestBuilder< Request, Response, RequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java deleted file mode 100644 index 0acbd998a6322..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master; - -import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; -import org.opensearch.client.OpenSearchClient; -import org.opensearch.core.action.ActionResponse; - -/** - * Base request builder for cluster-manager node operations - * - * @opensearch.internal - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeOperationRequestBuilder} - */ -@Deprecated -public abstract class MasterNodeOperationRequestBuilder< - Request extends MasterNodeRequest, - Response extends ActionResponse, - RequestBuilder extends MasterNodeOperationRequestBuilder> extends - ClusterManagerNodeOperationRequestBuilder { - - protected MasterNodeOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { - super(client, action, request); - } -} diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java deleted file mode 100644 index 36a3fc1d2de73..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master; - -import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; -import org.opensearch.client.OpenSearchClient; -import org.opensearch.core.action.ActionResponse; - -/** - * Base request builder for cluster-manager node read operations that can be executed on the local node as well - * - * @opensearch.internal - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeReadOperationRequestBuilder} - */ -@Deprecated -public abstract class MasterNodeReadOperationRequestBuilder< - Request extends MasterNodeReadRequest, - Response extends ActionResponse, - RequestBuilder extends MasterNodeReadOperationRequestBuilder> extends - ClusterManagerNodeReadOperationRequestBuilder { - - protected MasterNodeReadOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { - super(client, action, request); - } -} diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java deleted file mode 100644 index ea8f6a6e43cfe..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master; - -import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; -import org.opensearch.core.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * Base request for cluster-manager based read operations that allows to read the cluster state from the local node if needed - * - * @opensearch.internal - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeReadRequest} - */ -@Deprecated -public abstract class MasterNodeReadRequest> extends ClusterManagerNodeReadRequest { - protected MasterNodeReadRequest() {} - - protected MasterNodeReadRequest(StreamInput in) throws IOException { - super(in); - } -} diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java deleted file mode 100644 index cfab63a845f7f..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master; - -import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; -import org.opensearch.core.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * A based request for cluster-manager based operation. - * - * @opensearch.internal - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeRequest} - */ -@Deprecated -public abstract class MasterNodeRequest> extends ClusterManagerNodeRequest { - - protected MasterNodeRequest() {} - - protected MasterNodeRequest(StreamInput in) throws IOException { - super(in); - } -} diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java deleted file mode 100644 index eec7965bfed02..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master; - -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -/** - * A base class for operations that needs to be performed on the cluster-manager node. - * - * @opensearch.internal - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link TransportClusterManagerNodeAction} - */ -@Deprecated -public abstract class TransportMasterNodeAction, Response extends ActionResponse> extends - TransportClusterManagerNodeAction { - - protected TransportMasterNodeAction( - String actionName, - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - Writeable.Reader request, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); - } - - protected TransportMasterNodeAction( - String actionName, - boolean canTripCircuitBreaker, - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - Writeable.Reader request, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super( - actionName, - canTripCircuitBreaker, - transportService, - clusterService, - threadPool, - actionFilters, - request, - indexNameExpressionResolver - ); - } - -} diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java deleted file mode 100644 index b95459971737f..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master; - -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -/** - * A base class for read operations that needs to be performed on the cluster-manager node. - * Can also be executed on the local node if needed. - * - * @opensearch.internal - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link TransportClusterManagerNodeReadAction} - */ -@Deprecated -public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends - TransportClusterManagerNodeReadAction { - - protected TransportMasterNodeReadAction( - String actionName, - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - Writeable.Reader request, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); - } - - protected TransportMasterNodeReadAction( - String actionName, - boolean checkSizeLimit, - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - Writeable.Reader request, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super( - actionName, - checkSizeLimit, - transportService, - clusterService, - threadPool, - actionFilters, - request, - indexNameExpressionResolver - ); - } - -} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java deleted file mode 100644 index 0b66e3d932603..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master.info; - -import org.opensearch.core.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * Transport request for cluster information - * - * @opensearch.internal - */ -public abstract class ClusterInfoRequest> extends - org.opensearch.action.support.clustermanager.info.ClusterInfoRequest { - - public ClusterInfoRequest() {} - - public ClusterInfoRequest(StreamInput in) throws IOException { - super(in); - } - -} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java deleted file mode 100644 index 091413c0df6d7..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master.info; - -import org.opensearch.action.ActionType; -import org.opensearch.client.OpenSearchClient; -import org.opensearch.core.action.ActionResponse; - -/** - * Transport request builder for cluster information - * - * @opensearch.internal - */ -public abstract class ClusterInfoRequestBuilder< - Request extends ClusterInfoRequest, - Response extends ActionResponse, - Builder extends ClusterInfoRequestBuilder> extends - org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder { - - protected ClusterInfoRequestBuilder(OpenSearchClient client, ActionType action, Request request) { - super(client, action, request); - } -} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java deleted file mode 100644 index 2653e3a658674..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.master.info; - -import org.opensearch.action.support.ActionFilters; -import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportService; - -/** - * Perform cluster information action - * - * @opensearch.internal - */ -public abstract class TransportClusterInfoAction, Response extends ActionResponse> extends - org.opensearch.action.support.clustermanager.info.TransportClusterInfoAction { - - public TransportClusterInfoAction( - String actionName, - TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, - ActionFilters actionFilters, - Writeable.Reader request, - IndexNameExpressionResolver indexNameExpressionResolver - ) { - super(actionName, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); - } - -} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/package-info.java b/server/src/main/java/org/opensearch/action/support/master/info/package-info.java deleted file mode 100644 index 8f21383c1b90c..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/info/package-info.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Master Node Information transport handlers. - * - * As of 2.1, because supporting inclusive language, replaced by {@link org.opensearch.action.support.clustermanager.info} - */ -@Deprecated -package org.opensearch.action.support.master.info; diff --git a/server/src/main/java/org/opensearch/action/support/master/package-info.java b/server/src/main/java/org/opensearch/action/support/master/package-info.java deleted file mode 100644 index 9e90d96986fe1..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/master/package-info.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * Master Node transport handlers. - * - * As of 2.1, because supporting inclusive language, replaced by {@link org.opensearch.action.support.clustermanager} - */ -@Deprecated -package org.opensearch.action.support.master; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java deleted file mode 100644 index f0d3db71c27b7..0000000000000 --- a/server/src/test/java/org/opensearch/action/admin/indices/get/GetIndexRequestTests.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.action.admin.indices.get; - -import org.opensearch.action.support.master.info.ClusterInfoRequest; -import org.opensearch.test.OpenSearchTestCase; - -import static org.hamcrest.Matchers.is; - -public class GetIndexRequestTests extends OpenSearchTestCase { - public void testGetIndexRequestExtendsClusterInfoRequestOfDeprecatedClassPath() { - GetIndexRequest getIndexRequest = new GetIndexRequest().indices("test"); - assertThat(getIndexRequest instanceof ClusterInfoRequest, is(true)); - } -} From e6fc6008e408c41e2369ca308d5a763575ef8c89 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 27 Jan 2025 15:35:15 -0500 Subject: [PATCH 16/48] Update Netty to 4.1.117.Final (#17041) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 +- gradle/libs.versions.toml | 2 +- .../licenses/netty-buffer-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.117.Final.jar.sha1 | 1 + ...ransport-native-unix-common-4.1.115.Final.jar.sha1 | 1 - ...ransport-native-unix-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.117.Final.jar.sha1 | 1 + .../netty-handler-proxy-4.1.115.Final.jar.sha1 | 1 - .../netty-handler-proxy-4.1.117.Final.jar.sha1 | 1 + .../netty-resolver-dns-4.1.115.Final.jar.sha1 | 1 - .../netty-resolver-dns-4.1.117.Final.jar.sha1 | 1 + ...ransport-native-unix-common-4.1.115.Final.jar.sha1 | 1 - ...ransport-native-unix-common-4.1.117.Final.jar.sha1 | 1 + .../repositories/azure/AzureStorageService.java | 11 ++++++++++- .../src/main/plugin-metadata/plugin-security.policy | 1 + .../licenses/netty-all-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.117.Final.jar.sha1 | 1 + ...tty-transport-classes-epoll-4.1.115.Final.jar.sha1 | 1 - ...tty-transport-classes-epoll-4.1.117.Final.jar.sha1 | 1 + ...ransport-native-unix-common-4.1.115.Final.jar.sha1 | 1 - ...ransport-native-unix-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.117.Final.jar.sha1 | 1 + ...ransport-native-unix-common-4.1.115.Final.jar.sha1 | 1 - ...ransport-native-unix-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-buffer-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.117.Final.jar.sha1 | 1 + .../netty-resolver-dns-4.1.115.Final.jar.sha1 | 1 - .../netty-resolver-dns-4.1.117.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.115.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.117.Final.jar.sha1 | 1 + ...ransport-native-unix-common-4.1.115.Final.jar.sha1 | 1 - ...ransport-native-unix-common-4.1.117.Final.jar.sha1 | 1 + 98 files changed, 60 insertions(+), 50 deletions(-) delete mode 100644 libs/arrow-spi/licenses/netty-buffer-4.1.115.Final.jar.sha1 create mode 100644 libs/arrow-spi/licenses/netty-buffer-4.1.117.Final.jar.sha1 delete mode 100644 libs/arrow-spi/licenses/netty-common-4.1.115.Final.jar.sha1 create mode 100644 libs/arrow-spi/licenses/netty-common-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.117.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 01b58b0426bbb..4b5d3c8793dc3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,7 +51,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `lycheeverse/lychee-action` from 2.0.2 to 2.2.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610), [#16897](https://github.com/opensearch-project/OpenSearch/pull/16897)) - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) - Bump `mockito` from 5.14.1 to 5.14.2, `objenesis` from 3.2 to 3.3 and `bytebuddy` from 1.15.4 to 1.15.10 ([#16655](https://github.com/opensearch-project/OpenSearch/pull/16655)) -- Bump `Netty` from 4.1.114.Final to 4.1.115.Final ([#16661](https://github.com/opensearch-project/OpenSearch/pull/16661)) +- Bump `Netty` from 4.1.114.Final to 4.1.117.Final ([#16661](https://github.com/opensearch-project/OpenSearch/pull/16661), [#17041](https://github.com/opensearch-project/OpenSearch/pull/17041)) - Bump `org.xerial.snappy:snappy-java` from 1.1.10.6 to 1.1.10.7 ([#16665](https://github.com/opensearch-project/OpenSearch/pull/16665)) - Bump `codecov/codecov-action` from 4 to 5 ([#16667](https://github.com/opensearch-project/OpenSearch/pull/16667)) - Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.3 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718), [#16858](https://github.com/opensearch-project/OpenSearch/pull/16858)) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index c2afc85f79078..2230239983a01 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -32,7 +32,7 @@ grpc = "1.68.2" # when updating the JNA version, also update the version in buildSrc/build.gradle jna = "5.13.0" -netty = "4.1.115.Final" +netty = "4.1.117.Final" joda = "2.12.7" roaringbitmap = "1.3.0" diff --git a/libs/arrow-spi/licenses/netty-buffer-4.1.115.Final.jar.sha1 b/libs/arrow-spi/licenses/netty-buffer-4.1.115.Final.jar.sha1 deleted file mode 100644 index 825b91b71601d..0000000000000 --- a/libs/arrow-spi/licenses/netty-buffer-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5daf1030e5c36d198caf7562da2441a97ec0df6 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/netty-buffer-4.1.117.Final.jar.sha1 b/libs/arrow-spi/licenses/netty-buffer-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..9d10d066cb497 --- /dev/null +++ b/libs/arrow-spi/licenses/netty-buffer-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +022b4cc28194cb23671274499229e0ef35028fbd \ No newline at end of file diff --git a/libs/arrow-spi/licenses/netty-common-4.1.115.Final.jar.sha1 b/libs/arrow-spi/licenses/netty-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 2ff84dc14147d..0000000000000 --- a/libs/arrow-spi/licenses/netty-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9da10a9f72e3f87e181d91b525174007a6fc4f11 \ No newline at end of file diff --git a/libs/arrow-spi/licenses/netty-common-4.1.117.Final.jar.sha1 b/libs/arrow-spi/licenses/netty-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..b399dfc5a6b14 --- /dev/null +++ b/libs/arrow-spi/licenses/netty-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +9e074a4382f56b37f3b9ee1fc21d53e7af58ec9d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 deleted file mode 100644 index 825b91b71601d..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5daf1030e5c36d198caf7562da2441a97ec0df6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..9d10d066cb497 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +022b4cc28194cb23671274499229e0ef35028fbd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 deleted file mode 100644 index e39392f923f7b..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d326bf3a4c785b272da3db6941779a1bd5448378 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..2ac134b2057fb --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +2831d3431ed93d9c0b64b1c0cce2ced4737539aa \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 deleted file mode 100644 index ce4bdc323b2a0..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80f0dece29a2c0269217e8dd1b6db6ff9710781f \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..222d2e3acc03e --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +5a0f8cd908b8b09b2cd1d39c1d2086a4d12e6029 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 deleted file mode 100644 index f718fc9bf1622..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0bc474c27c96e3a309da73160fbcfe0bd3aa85bc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..fd83790095b7d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +e0a678ac80e00b08a4c0118d496efabc4516ebbf \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 2ff84dc14147d..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9da10a9f72e3f87e181d91b525174007a6fc4f11 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..b399dfc5a6b14 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +9e074a4382f56b37f3b9ee1fc21d53e7af58ec9d \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 deleted file mode 100644 index 5c58e02209691..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d54dbf68b9d88a98240107758c6b63da5e46e23a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..0c06e7a876610 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +db14cd99515f8c98a3f2a347718e59f14d85c503 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 deleted file mode 100644 index b1d9f1f679966..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e33b4d476c03975957f5d8d0319d592bf2bc5e96 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..390e9268d0b61 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +581b37489a03162f473264b65f53d504269a74b0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 deleted file mode 100644 index aeeda9c282b30..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39cef77c1a25908ac1abf4960c2e789f0bf70ff9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..ef3a353eeb56c --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +f81d72962bd134d8d8e11b514321134fa5fd0ce6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 41996121685f0..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc96c67d06cd6b5eb677f2728f27bf2e3d9a7284 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..792339131bf29 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +684f2316ff2b2171babbc17c95ac3bd97f5f091e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 deleted file mode 100644 index fc735d97148ef..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d39b9866939cbbb8ae3a1af5c1df5ddf93656d47 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..283d03512f2ab --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +5023de39015bdc7a5740dcae109fb0a72160c207 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 deleted file mode 100644 index f718fc9bf1622..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0bc474c27c96e3a309da73160fbcfe0bd3aa85bc \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..fd83790095b7d --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +e0a678ac80e00b08a4c0118d496efabc4516ebbf \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.115.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.115.Final.jar.sha1 deleted file mode 100644 index 8a617d4d857cf..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -955faadc8f5b1ca41881398043b62ce1a245800c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.117.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..fc00e4a6fbe08 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +c7e72e5ec8e349e15e2d38811696f6404e0a49e0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.115.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.115.Final.jar.sha1 deleted file mode 100644 index 13156914a5fd6..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5f61f44704a09b373167f1c0b1854bdcd880a8cb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.117.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..f44eccbf8b1dd --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +f032fb832d421d9906c705a1d328188723961c74 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 deleted file mode 100644 index e443fa3fa383f..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -33b0636c6fe36c7f41d9da204aa1ad94ff244ac7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..0466494805bed --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +54c433e75a6e0f101ac78332a9958cd200ca2434 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 41996121685f0..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc96c67d06cd6b5eb677f2728f27bf2e3d9a7284 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..792339131bf29 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +684f2316ff2b2171babbc17c95ac3bd97f5f091e \ No newline at end of file diff --git a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java index 4f30247f0af08..19c9af317247f 100644 --- a/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/opensearch/repositories/azure/AzureStorageService.java @@ -445,7 +445,16 @@ private static class NioThreadFactory implements ThreadFactory { } public Thread newThread(Runnable r) { - final Thread t = new Thread(group, r, namePrefix + threadNumber.getAndIncrement(), 0); + final Thread t = new Thread(group, new Runnable() { + @SuppressWarnings({ "deprecation", "removal" }) + @Override + public void run() { + AccessController.doPrivileged((PrivilegedAction) () -> { + r.run(); + return null; + }); + } + }, namePrefix + threadNumber.getAndIncrement(), 0); if (t.isDaemon()) { t.setDaemon(false); diff --git a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy index eedcfd98da150..9dc854489c716 100644 --- a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy @@ -39,6 +39,7 @@ grant { permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.lang.RuntimePermission "setContextClassLoader"; permission java.lang.RuntimePermission "shutdownHooks"; + permission java.lang.RuntimePermission "getClassLoader"; // azure client set Authenticator for proxy username/password permission java.net.NetPermission "setDefaultAuthenticator"; diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.115.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.115.Final.jar.sha1 deleted file mode 100644 index 4491099ace714..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d65f327e8406f80f744060e10135dd5f61a369a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.117.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..8e20e676f3821 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +61dfea8203f3b653e79959bc259adf848b998d8c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.115.Final.jar.sha1 deleted file mode 100644 index 825b91b71601d..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5daf1030e5c36d198caf7562da2441a97ec0df6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..9d10d066cb497 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +022b4cc28194cb23671274499229e0ef35028fbd \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.115.Final.jar.sha1 deleted file mode 100644 index e39392f923f7b..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d326bf3a4c785b272da3db6941779a1bd5448378 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..2ac134b2057fb --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +2831d3431ed93d9c0b64b1c0cce2ced4737539aa \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.115.Final.jar.sha1 deleted file mode 100644 index ce4bdc323b2a0..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80f0dece29a2c0269217e8dd1b6db6ff9710781f \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..222d2e3acc03e --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +5a0f8cd908b8b09b2cd1d39c1d2086a4d12e6029 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 deleted file mode 100644 index f718fc9bf1622..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0bc474c27c96e3a309da73160fbcfe0bd3aa85bc \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..fd83790095b7d --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +e0a678ac80e00b08a4c0118d496efabc4516ebbf \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 2ff84dc14147d..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9da10a9f72e3f87e181d91b525174007a6fc4f11 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..b399dfc5a6b14 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +9e074a4382f56b37f3b9ee1fc21d53e7af58ec9d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.115.Final.jar.sha1 deleted file mode 100644 index 5c58e02209691..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d54dbf68b9d88a98240107758c6b63da5e46e23a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..0c06e7a876610 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +db14cd99515f8c98a3f2a347718e59f14d85c503 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.115.Final.jar.sha1 deleted file mode 100644 index b1d9f1f679966..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e33b4d476c03975957f5d8d0319d592bf2bc5e96 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..390e9268d0b61 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +581b37489a03162f473264b65f53d504269a74b0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.115.Final.jar.sha1 deleted file mode 100644 index aeeda9c282b30..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39cef77c1a25908ac1abf4960c2e789f0bf70ff9 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..ef3a353eeb56c --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +f81d72962bd134d8d8e11b514321134fa5fd0ce6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.115.Final.jar.sha1 deleted file mode 100644 index aea007d98763e..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -11fea00408ecbd8b8d1f0698d708e37db4a01841 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..dc60636a6611c --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +0ebca585acd227b8682ed5b2aafbb86d07f77848 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 41996121685f0..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc96c67d06cd6b5eb677f2728f27bf2e3d9a7284 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..792339131bf29 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +684f2316ff2b2171babbc17c95ac3bd97f5f091e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.115.Final.jar.sha1 deleted file mode 100644 index 825b91b71601d..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5daf1030e5c36d198caf7562da2441a97ec0df6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..9d10d066cb497 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +022b4cc28194cb23671274499229e0ef35028fbd \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.115.Final.jar.sha1 deleted file mode 100644 index e39392f923f7b..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d326bf3a4c785b272da3db6941779a1bd5448378 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..2ac134b2057fb --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +2831d3431ed93d9c0b64b1c0cce2ced4737539aa \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.115.Final.jar.sha1 deleted file mode 100644 index ce4bdc323b2a0..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80f0dece29a2c0269217e8dd1b6db6ff9710781f \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..222d2e3acc03e --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +5a0f8cd908b8b09b2cd1d39c1d2086a4d12e6029 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 2ff84dc14147d..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9da10a9f72e3f87e181d91b525174007a6fc4f11 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..b399dfc5a6b14 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +9e074a4382f56b37f3b9ee1fc21d53e7af58ec9d \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.115.Final.jar.sha1 deleted file mode 100644 index 5c58e02209691..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d54dbf68b9d88a98240107758c6b63da5e46e23a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..0c06e7a876610 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +db14cd99515f8c98a3f2a347718e59f14d85c503 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.115.Final.jar.sha1 deleted file mode 100644 index b1d9f1f679966..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e33b4d476c03975957f5d8d0319d592bf2bc5e96 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..390e9268d0b61 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +581b37489a03162f473264b65f53d504269a74b0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.115.Final.jar.sha1 deleted file mode 100644 index aeeda9c282b30..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39cef77c1a25908ac1abf4960c2e789f0bf70ff9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..ef3a353eeb56c --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +f81d72962bd134d8d8e11b514321134fa5fd0ce6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 41996121685f0..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc96c67d06cd6b5eb677f2728f27bf2e3d9a7284 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..792339131bf29 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +684f2316ff2b2171babbc17c95ac3bd97f5f091e \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 deleted file mode 100644 index 825b91b71601d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5daf1030e5c36d198caf7562da2441a97ec0df6 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..9d10d066cb497 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-buffer-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +022b4cc28194cb23671274499229e0ef35028fbd \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 deleted file mode 100644 index e39392f923f7b..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d326bf3a4c785b272da3db6941779a1bd5448378 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..2ac134b2057fb --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +2831d3431ed93d9c0b64b1c0cce2ced4737539aa \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 deleted file mode 100644 index fc735d97148ef..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d39b9866939cbbb8ae3a1af5c1df5ddf93656d47 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..283d03512f2ab --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-dns-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +5023de39015bdc7a5740dcae109fb0a72160c207 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 deleted file mode 100644 index ce4bdc323b2a0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -80f0dece29a2c0269217e8dd1b6db6ff9710781f \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..222d2e3acc03e --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +5a0f8cd908b8b09b2cd1d39c1d2086a4d12e6029 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 deleted file mode 100644 index f718fc9bf1622..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0bc474c27c96e3a309da73160fbcfe0bd3aa85bc \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..fd83790095b7d --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-codec-http2-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +e0a678ac80e00b08a4c0118d496efabc4516ebbf \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 2ff84dc14147d..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9da10a9f72e3f87e181d91b525174007a6fc4f11 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..b399dfc5a6b14 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +9e074a4382f56b37f3b9ee1fc21d53e7af58ec9d \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 deleted file mode 100644 index 5c58e02209691..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d54dbf68b9d88a98240107758c6b63da5e46e23a \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..0c06e7a876610 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-handler-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +db14cd99515f8c98a3f2a347718e59f14d85c503 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 deleted file mode 100644 index b1d9f1f679966..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e33b4d476c03975957f5d8d0319d592bf2bc5e96 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..390e9268d0b61 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +581b37489a03162f473264b65f53d504269a74b0 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 deleted file mode 100644 index e443fa3fa383f..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -33b0636c6fe36c7f41d9da204aa1ad94ff244ac7 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..0466494805bed --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-resolver-dns-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +54c433e75a6e0f101ac78332a9958cd200ca2434 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 deleted file mode 100644 index aeeda9c282b30..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39cef77c1a25908ac1abf4960c2e789f0bf70ff9 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..ef3a353eeb56c --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +f81d72962bd134d8d8e11b514321134fa5fd0ce6 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 deleted file mode 100644 index 41996121685f0..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.115.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc96c67d06cd6b5eb677f2728f27bf2e3d9a7284 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 new file mode 100644 index 0000000000000..792339131bf29 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/netty-transport-native-unix-common-4.1.117.Final.jar.sha1 @@ -0,0 +1 @@ +684f2316ff2b2171babbc17c95ac3bd97f5f091e \ No newline at end of file From b5234a54db3314d6d7c8ba75966b80f8d03ba61e Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Mon, 27 Jan 2025 13:48:19 -0800 Subject: [PATCH 17/48] [Star Tree] [Search] Resolve Date histogram with metric aggregation using star-tree (#16674) --------- Signed-off-by: Sandesh Kumar Co-authored-by: Sandesh Kumar --- CHANGELOG.md | 1 + .../java/org/opensearch/common/Rounding.java | 9 + .../datacube/DateDimension.java | 18 + .../startree/utils/StarTreeQueryHelper.java | 152 +++++--- .../aggregations/StarTreeBucketCollector.java | 75 ++++ .../StarTreePreComputeCollector.java | 32 ++ .../bucket/BucketsAggregator.java | 15 + .../histogram/DateHistogramAggregator.java | 129 ++++++- .../DateHistogramAggregatorFactory.java | 4 + .../aggregations/metrics/AvgAggregator.java | 55 ++- .../aggregations/metrics/MaxAggregator.java | 32 +- .../aggregations/metrics/MinAggregator.java | 32 +- .../aggregations/metrics/SumAggregator.java | 45 ++- .../metrics/ValueCountAggregator.java | 27 +- .../search/startree/StarTreeFilter.java | 18 +- .../search/SearchServiceStarTreeTests.java | 211 +++++++++- .../DateHistogramAggregatorTests.java | 361 ++++++++++++++++++ .../startree/StarTreeFilterTests.java | 3 +- .../aggregations/AggregatorTestCase.java | 3 +- 19 files changed, 1144 insertions(+), 78 deletions(-) create mode 100644 server/src/main/java/org/opensearch/search/aggregations/StarTreeBucketCollector.java create mode 100644 server/src/main/java/org/opensearch/search/aggregations/StarTreePreComputeCollector.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b5d3c8793dc3..29e2e09a16ac1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Added new Setting property UnmodifiableOnRestore to prevent updating settings on restore snapshot ([#16957](https://github.com/opensearch-project/OpenSearch/pull/16957)) - Introduce Template query ([#16818](https://github.com/opensearch-project/OpenSearch/pull/16818)) - Propagate the sourceIncludes and excludes fields from fetchSourceContext to FieldsVisitor. ([#17080](https://github.com/opensearch-project/OpenSearch/pull/17080)) +- [Star Tree] [Search] Resolving Date histogram with metric aggregation using star-tree ([#16674](https://github.com/opensearch-project/OpenSearch/pull/16674)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index 12a399635046e..5d1251e9bed7c 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -270,6 +270,10 @@ public void writeTo(StreamOutput out) throws IOException { public abstract byte id(); + public DateTimeUnit unit() { + return null; + } + /** * A strategy for rounding milliseconds since epoch. * @@ -517,6 +521,11 @@ public byte id() { return ID; } + @Override + public DateTimeUnit unit() { + return unit; + } + private LocalDateTime truncateLocalDateTime(LocalDateTime localDateTime) { switch (unit) { case SECOND_OF_MINUTE: diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java index 88a67e1134067..36dd42122c60e 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DateDimension.java @@ -169,6 +169,24 @@ public int compare(DateTimeUnitRounding unit1, DateTimeUnitRounding unit2) { } } + /** + * Returns the closest valid calendar interval to be used for the search interval + */ + public DateTimeUnitRounding findClosestValidInterval(DateTimeUnitRounding searchInterval) { + DateTimeUnitComparator comparator = new DateTimeUnitComparator(); + DateTimeUnitRounding closestValidInterval = null; + + // Find the largest interval that is less than or equal to search interval + for (DateTimeUnitRounding interval : sortedCalendarIntervals) { + if (comparator.compare(interval, searchInterval) <= 0) { + closestValidInterval = interval; + } else { + break; + } + } + return closestValidInterval; + } + /** * Returns a sorted list of dateTimeUnits based on the DateTimeUnitComparator */ diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java index e46cf6f56b36e..e2414d9f6a8a1 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java @@ -17,10 +17,13 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.mapper.CompositeDataCubeFieldType; import org.opensearch.index.query.MatchAllQueryBuilder; @@ -28,7 +31,8 @@ import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.LeafBucketCollector; -import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.builder.SearchSourceBuilder; @@ -37,9 +41,10 @@ import org.opensearch.search.startree.StarTreeQueryContext; import java.io.IOException; -import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.stream.Collectors; @@ -74,10 +79,16 @@ public static StarTreeQueryContext getStarTreeQueryContext(SearchContext context ); for (AggregatorFactory aggregatorFactory : context.aggregations().factories().getFactories()) { - MetricStat metricStat = validateStarTreeMetricSupport(compositeMappedFieldType, aggregatorFactory); - if (metricStat == null) { - return null; + // first check for aggregation is a metric aggregation + if (validateStarTreeMetricSupport(compositeMappedFieldType, aggregatorFactory)) { + continue; + } + + // if not a metric aggregation, check for applicable date histogram shape + if (validateDateHistogramSupport(compositeMappedFieldType, aggregatorFactory)) { + continue; } + return null; } // need to cache star tree values only for multiple aggregations @@ -99,64 +110,85 @@ private static StarTreeQueryContext tryCreateStarTreeQueryContext( Map queryMap; if (queryBuilder == null || queryBuilder instanceof MatchAllQueryBuilder) { queryMap = null; - } else if (queryBuilder instanceof TermQueryBuilder) { + } else if (queryBuilder instanceof TermQueryBuilder termQueryBuilder) { // TODO: Add support for keyword fields - if (compositeFieldType.getDimensions().stream().anyMatch(d -> d.getDocValuesType() != DocValuesType.SORTED_NUMERIC)) { - // return null for non-numeric fields - return null; - } - - List supportedDimensions = compositeFieldType.getDimensions() + Dimension matchedDimension = compositeFieldType.getDimensions() .stream() - .map(Dimension::getField) - .collect(Collectors.toList()); - queryMap = getStarTreePredicates(queryBuilder, supportedDimensions); - if (queryMap == null) { + .filter(d -> (d.getField().equals(termQueryBuilder.fieldName()) && d.getDocValuesType() == DocValuesType.SORTED_NUMERIC)) + .findFirst() + .orElse(null); + if (matchedDimension == null) { return null; } + queryMap = Map.of(termQueryBuilder.fieldName(), Long.parseLong(termQueryBuilder.value().toString())); } else { return null; } return new StarTreeQueryContext(compositeIndexFieldInfo, queryMap, cacheStarTreeValuesSize); } - /** - * Parse query body to star-tree predicates - * @param queryBuilder to match star-tree supported query shape - * @return predicates to match - */ - private static Map getStarTreePredicates(QueryBuilder queryBuilder, List supportedDimensions) { - TermQueryBuilder tq = (TermQueryBuilder) queryBuilder; - String field = tq.fieldName(); - if (!supportedDimensions.contains(field)) { - return null; - } - long inputQueryVal = Long.parseLong(tq.value().toString()); - - // Create a map with the field and the value - Map predicateMap = new HashMap<>(); - predicateMap.put(field, inputQueryVal); - return predicateMap; - } - - private static MetricStat validateStarTreeMetricSupport( + private static boolean validateStarTreeMetricSupport( CompositeDataCubeFieldType compositeIndexFieldInfo, AggregatorFactory aggregatorFactory ) { - if (aggregatorFactory instanceof MetricAggregatorFactory && aggregatorFactory.getSubFactories().getFactories().length == 0) { + if (aggregatorFactory instanceof MetricAggregatorFactory metricAggregatorFactory + && metricAggregatorFactory.getSubFactories().getFactories().length == 0) { String field; Map> supportedMetrics = compositeIndexFieldInfo.getMetrics() .stream() .collect(Collectors.toMap(Metric::getField, Metric::getMetrics)); - MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat(); - field = ((MetricAggregatorFactory) aggregatorFactory).getField(); + MetricStat metricStat = metricAggregatorFactory.getMetricStat(); + field = metricAggregatorFactory.getField(); + + return supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat); + } + return false; + } + + private static boolean validateDateHistogramSupport( + CompositeDataCubeFieldType compositeIndexFieldInfo, + AggregatorFactory aggregatorFactory + ) { + if (!(aggregatorFactory instanceof DateHistogramAggregatorFactory dateHistogramAggregatorFactory) + || aggregatorFactory.getSubFactories().getFactories().length < 1) { + return false; + } + + // Find the DateDimension in the dimensions list + DateDimension starTreeDateDimension = null; + for (Dimension dimension : compositeIndexFieldInfo.getDimensions()) { + if (dimension instanceof DateDimension) { + starTreeDateDimension = (DateDimension) dimension; + break; + } + } + + // If no DateDimension is found, validation fails + if (starTreeDateDimension == null) { + return false; + } + + // Ensure the rounding is not null + if (dateHistogramAggregatorFactory.getRounding() == null) { + return false; + } + + // Find the closest valid interval in the DateTimeUnitRounding class associated with star tree + DateTimeUnitRounding rounding = starTreeDateDimension.findClosestValidInterval( + new DateTimeUnitAdapter(dateHistogramAggregatorFactory.getRounding()) + ); + if (rounding == null) { + return false; + } - if (field != null && supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { - return metricStat; + // Validate all sub-factories + for (AggregatorFactory subFactory : aggregatorFactory.getSubFactories().getFactories()) { + if (!validateStarTreeMetricSupport(compositeIndexFieldInfo, subFactory)) { + return false; } } - return null; + return true; } public static CompositeIndexFieldInfo getSupportedStarTree(SearchContext context) { @@ -222,11 +254,37 @@ public static LeafBucketCollector getStarTreeLeafCollector( // Call the final consumer after processing all entries finalConsumer.run(); - // Return a LeafBucketCollector that terminates collection - return new LeafBucketCollectorBase(sub, valuesSource.doubleValues(ctx)) { + // Terminate after pre-computing aggregation + throw new CollectionTerminatedException(); + } + + public static StarTreeBucketCollector getStarTreeBucketMetricCollector( + CompositeIndexFieldInfo starTree, + String metric, + ValuesSource.Numeric valuesSource, + StarTreeBucketCollector parentCollector, + Consumer growArrays, + BiConsumer updateBucket + ) throws IOException { + assert parentCollector != null; + return new StarTreeBucketCollector(parentCollector) { + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(), + metric + ); + SortedNumericStarTreeValuesIterator metricValuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(metricName); + @Override - public void collect(int doc, long bucket) { - throw new CollectionTerminatedException(); + public void collectStarTreeEntry(int starTreeEntryBit, long bucket) throws IOException { + growArrays.accept(bucket); + // Advance the valuesIterator to the current bit + if (!metricValuesIterator.advanceExact(starTreeEntryBit)) { + return; // Skip if no entries for this document + } + long metricValue = metricValuesIterator.nextValue(); + updateBucket.accept(bucket, metricValue); } }; } @@ -240,7 +298,7 @@ public static FixedBitSet getStarTreeFilteredValues(SearchContext context, LeafR throws IOException { FixedBitSet result = context.getStarTreeQueryContext().getStarTreeValues(ctx); if (result == null) { - result = StarTreeFilter.getStarTreeResult(starTreeValues, context.getStarTreeQueryContext().getQueryMap()); + result = StarTreeFilter.getStarTreeResult(starTreeValues, context.getStarTreeQueryContext().getQueryMap(), Set.of()); context.getStarTreeQueryContext().setStarTreeValues(ctx, result); } return result; diff --git a/server/src/main/java/org/opensearch/search/aggregations/StarTreeBucketCollector.java b/server/src/main/java/org/opensearch/search/aggregations/StarTreeBucketCollector.java new file mode 100644 index 0000000000000..e994b65442a49 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/StarTreeBucketCollector.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations; + +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Collector for star tree aggregation + * This abstract class exposes utilities to help avoid traversing star-tree multiple times and + * collect relevant metrics across nested aggregations in a single traversal + * @opensearch.internal + */ +@ExperimentalApi +public abstract class StarTreeBucketCollector { + + protected final StarTreeValues starTreeValues; + protected final FixedBitSet matchingDocsBitSet; + protected final List subCollectors = new ArrayList<>(); + + public StarTreeBucketCollector(StarTreeValues starTreeValues, FixedBitSet matchingDocsBitSet) throws IOException { + this.starTreeValues = starTreeValues; + this.matchingDocsBitSet = matchingDocsBitSet; + this.setSubCollectors(); + } + + public StarTreeBucketCollector(StarTreeBucketCollector parent) throws IOException { + this.starTreeValues = parent.getStarTreeValues(); + this.matchingDocsBitSet = parent.getMatchingDocsBitSet(); + this.setSubCollectors(); + } + + /** + * Sets the sub-collectors to track nested aggregators + */ + public void setSubCollectors() throws IOException {}; + + /** + * Returns a list of sub-collectors to track nested aggregators + */ + public List getSubCollectors() { + return subCollectors; + } + + /** + * Returns the tree values to iterate + */ + public StarTreeValues getStarTreeValues() { + return starTreeValues; + } + + /** + * Returns the matching docs bitset to iterate upon the star-tree values based on search query + */ + public FixedBitSet getMatchingDocsBitSet() { + return matchingDocsBitSet; + } + + /** + * Collects the star tree entry and bucket ordinal to update + * The method implementation should identify the metrics to collect from that star-tree entry to the specified bucket + */ + public abstract void collectStarTreeEntry(int starTreeEntry, long bucket) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/StarTreePreComputeCollector.java b/server/src/main/java/org/opensearch/search/aggregations/StarTreePreComputeCollector.java new file mode 100644 index 0000000000000..c2f2017997c4d --- /dev/null +++ b/server/src/main/java/org/opensearch/search/aggregations/StarTreePreComputeCollector.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations; + +import org.apache.lucene.index.LeafReaderContext; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; + +import java.io.IOException; + +/** + * This interface is used to pre-compute the star tree bucket collector for each segment/leaf. + * It is utilized by parent aggregation to retrieve a StarTreeBucketCollector which can be used to + * pre-compute the associated aggregation along with its parent pre-computation using star-tree + * + * @opensearch.internal + */ +public interface StarTreePreComputeCollector { + /** + * Get the star tree bucket collector for the specified segment/leaf + */ + StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException; +} diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java index 5420d8c7f6dbf..f075d67b0f48d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/BucketsAggregator.java @@ -43,6 +43,7 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.StarTreeBucketCollector; import org.opensearch.search.aggregations.bucket.global.GlobalAggregator; import org.opensearch.search.aggregations.bucket.terms.LongKeyedBucketOrds; import org.opensearch.search.aggregations.support.AggregationPath; @@ -129,6 +130,20 @@ public final void collectExistingBucket(LeafBucketCollector subCollector, int do subCollector.collect(doc, bucketOrd); } + /** + * Utility method to collect doc count in the given bucket (identified by the bucket ordinal) + * After collecting doc count, invoke collectStarTreeEntry() for sub-collectors to update their relevant buckets + */ + public final void collectStarTreeBucket(StarTreeBucketCollector collector, long docCount, long bucketOrd, int entryBit) + throws IOException { + if (docCounts.increment(bucketOrd, docCount) == docCount) { + multiBucketConsumer.accept(0); + } + for (StarTreeBucketCollector subCollector : collector.getSubCollectors()) { + subCollector.collectStarTreeEntry(entryBit, bucketOrd); + } + } + /** * This only tidies up doc counts. Call {@link MergingBucketsDeferringCollector#mergeBuckets(long[])} to merge the actual * ordinals and doc ID deltas. diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 96a49bc3fd5f6..23fbacc979224 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -34,11 +34,23 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; +import org.apache.lucene.util.FixedBitSet; import org.opensearch.common.Nullable; import org.opensearch.common.Rounding; import org.opensearch.common.lease.Releasables; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.DateDimension; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -47,6 +59,8 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.bucket.BucketsAggregator; import org.opensearch.search.aggregations.bucket.filterrewrite.DateHistogramAggregatorBridge; import org.opensearch.search.aggregations.bucket.filterrewrite.FilterRewriteOptimizationContext; @@ -54,13 +68,17 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeFilter; import java.io.IOException; import java.util.Collections; +import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; import static org.opensearch.search.aggregations.bucket.filterrewrite.DateHistogramAggregatorBridge.segmentMatchAll; /** @@ -71,7 +89,7 @@ * * @opensearch.internal */ -class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAggregator { +class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAggregator, StarTreePreComputeCollector { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final Rounding rounding; @@ -85,8 +103,11 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg private final LongBounds extendedBounds; private final LongBounds hardBounds; private final LongKeyedBucketOrds bucketOrds; + private final String starTreeDateDimension; + private boolean starTreeDateRoundingRequired = true; private final FilterRewriteOptimizationContext filterRewriteOptimizationContext; + public final String STARTREE_TIMESTAMP_FIELD = "@timestamp"; DateHistogramAggregator( String name, @@ -151,6 +172,7 @@ protected Function bucketOrdProducer() { } }; filterRewriteOptimizationContext = new FilterRewriteOptimizationContext(bridge, parent, subAggregators.length, context); + this.starTreeDateDimension = (context.getStarTreeQueryContext() != null) ? fetchStarTreeCalendarUnit() : null; } @Override @@ -171,6 +193,13 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol if (optimized) throw new CollectionTerminatedException(); SortedNumericDocValues values = valuesSource.longValues(ctx); + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + if (supportedStarTree != null) { + if (preComputeWithStarTree(ctx, supportedStarTree) == true) { + throw new CollectionTerminatedException(); + } + } + return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { @@ -201,6 +230,88 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } + private String fetchStarTreeCalendarUnit() { + if (this.rounding.unit() == null) { + return null; + } + + CompositeDataCubeFieldType compositeMappedFieldType = (CompositeDataCubeFieldType) context.mapperService() + .getCompositeFieldTypes() + .iterator() + .next(); + DateDimension starTreeDateDimension = (DateDimension) compositeMappedFieldType.getDimensions() + .stream() + .filter(dim -> dim.getField().equals(STARTREE_TIMESTAMP_FIELD)) + .findFirst() // Get the first matching time dimension + .orElseThrow(() -> new AssertionError(String.format(Locale.ROOT, "Date dimension '%s' not found", STARTREE_TIMESTAMP_FIELD))); + + DateTimeUnitAdapter dateTimeUnitRounding = new DateTimeUnitAdapter(this.rounding.unit()); + DateTimeUnitRounding rounding = starTreeDateDimension.findClosestValidInterval(dateTimeUnitRounding); + String dimensionName = STARTREE_TIMESTAMP_FIELD + "_" + rounding.shortName(); + if (rounding.shortName().equals(this.rounding.unit().shortName())) { + this.starTreeDateRoundingRequired = false; + } + return dimensionName; + } + + @Override + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException { + assert parentCollector == null; + StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); + return new StarTreeBucketCollector( + starTreeValues, + StarTreeFilter.getStarTreeResult(starTreeValues, context.getStarTreeQueryContext().getQueryMap(), Set.of(starTreeDateDimension)) + ) { + @Override + public void setSubCollectors() throws IOException { + for (Aggregator aggregator : subAggregators) { + this.subCollectors.add(((StarTreePreComputeCollector) aggregator).getStarTreeBucketCollector(ctx, starTree, this)); + } + } + + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getDimensionValuesIterator(starTreeDateDimension); + + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + "_doc_count", + MetricStat.DOC_COUNT.getTypeName() + ); + SortedNumericStarTreeValuesIterator docCountsIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(metricName); + + @Override + public void collectStarTreeEntry(int starTreeEntry, long owningBucketOrd) throws IOException { + if (!valuesIterator.advanceExact(starTreeEntry)) { + return; + } + + for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { + long dimensionValue = starTreeDateRoundingRequired + ? preparedRounding.round(valuesIterator.nextValue()) + : valuesIterator.nextValue(); + + if (docCountsIterator.advanceExact(starTreeEntry)) { + long metricValue = docCountsIterator.nextValue(); + + long bucketOrd = bucketOrds.add(owningBucketOrd, dimensionValue); + if (bucketOrd < 0) { + bucketOrd = -1 - bucketOrd; + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); + } else { + grow(bucketOrd + 1); + collectStarTreeBucket(this, metricValue, bucketOrd, starTreeEntry); + } + } + } + } + }; + } + @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { return buildAggregationsForVariableBuckets(owningBucketOrds, bucketOrds, (bucketValue, docCount, subAggregationResults) -> { @@ -268,4 +379,20 @@ public double bucketSize(long bucket, Rounding.DateTimeUnit unitSize) { return 1.0; } } + + private boolean preComputeWithStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { + StarTreeBucketCollector starTreeBucketCollector = getStarTreeBucketCollector(ctx, starTree, null); + FixedBitSet matchingDocsBitSet = starTreeBucketCollector.getMatchingDocsBitSet(); + + int numBits = matchingDocsBitSet.length(); + + if (numBits > 0) { + for (int bit = matchingDocsBitSet.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) + ? matchingDocsBitSet.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + starTreeBucketCollector.collectStarTreeEntry(bit, 0); + } + } + return true; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 807ec1ab4e4b7..55da3189a9fb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -153,4 +153,8 @@ protected Aggregator createUnmapped(SearchContext searchContext, Aggregator pare protected boolean supportsConcurrentSegmentSearch() { return true; } + + public Rounding.DateTimeUnit getRounding() { + return this.rounding.unit(); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java index 2970c5ca851e7..c9f5bb7f3534b 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java @@ -53,6 +53,8 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -68,7 +70,7 @@ * * @opensearch.internal */ -class AvgAggregator extends NumericMetricsAggregator.SingleValue { +class AvgAggregator extends NumericMetricsAggregator.SingleValue implements StarTreePreComputeCollector { final ValuesSource.Numeric valuesSource; @@ -108,6 +110,11 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc } CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); if (supportedStarTree != null) { + if (parent != null && subAggregators.length == 0) { + // If this a child aggregator, then the parent will trigger star-tree pre-computation. + // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators + return LeafBucketCollector.NO_OP_COLLECTOR; + } return getStarTreeLeafCollector(ctx, sub, supportedStarTree); } return getDefaultLeafCollector(ctx, sub); @@ -164,7 +171,7 @@ public LeafBucketCollector getStarTreeLeafCollector(LeafReaderContext ctx, LeafB MetricStat.VALUE_COUNT.getTypeName() ); - final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), 0); + final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), compensations.get(0)); SortedNumericStarTreeValuesIterator sumValuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues .getMetricValuesIterator(sumMetricName); SortedNumericStarTreeValuesIterator countValueIterator = (SortedNumericStarTreeValuesIterator) starTreeValues @@ -192,6 +199,7 @@ public LeafBucketCollector getStarTreeLeafCollector(LeafReaderContext ctx, LeafB } sums.set(0, kahanSummation.value()); + compensations.set(0, kahanSummation.delta()); return new LeafBucketCollectorBase(sub, valuesSource.doubleValues(ctx)) { @Override public void collect(int doc, long bucket) { @@ -226,4 +234,47 @@ public void doClose() { Releasables.close(counts, sums, compensations); } + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException { + assert parentCollector != null; + return new StarTreeBucketCollector(parentCollector) { + String sumMetricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(), + MetricStat.SUM.getTypeName() + ); + String valueCountMetricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(), + MetricStat.VALUE_COUNT.getTypeName() + ); + SortedNumericStarTreeValuesIterator sumMetricValuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(sumMetricName); + SortedNumericStarTreeValuesIterator valueCountMetricValuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(valueCountMetricName); + + final CompensatedSum kahanSummation = new CompensatedSum(0, 0); + + @Override + public void collectStarTreeEntry(int starTreeEntryBit, long bucket) throws IOException { + counts = context.bigArrays().grow(counts, bucket + 1); + sums = context.bigArrays().grow(sums, bucket + 1); + compensations = context.bigArrays().grow(compensations, bucket + 1); + // Advance the valuesIterator to the current bit + if (!sumMetricValuesIterator.advanceExact(starTreeEntryBit) + || !valueCountMetricValuesIterator.advanceExact(starTreeEntryBit)) { + return; // Skip if no entries for this document + } + kahanSummation.reset(sums.get(bucket), compensations.get(bucket)); + kahanSummation.add(NumericUtils.sortableLongToDouble(sumMetricValuesIterator.nextValue())); + + sums.set(bucket, kahanSummation.value()); + compensations.set(bucket, kahanSummation.delta()); + counts.increment(bucket, valueCountMetricValuesIterator.nextValue()); + } + }; + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java index 257109bca54bb..49aaf5e0670bb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java @@ -52,6 +52,8 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -69,7 +71,7 @@ * * @opensearch.internal */ -class MaxAggregator extends NumericMetricsAggregator.SingleValue { +class MaxAggregator extends NumericMetricsAggregator.SingleValue implements StarTreePreComputeCollector { final ValuesSource.Numeric valuesSource; final DocValueFormat formatter; @@ -130,6 +132,11 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); if (supportedStarTree != null) { + if (parent != null && subAggregators.length == 0) { + // If this a child aggregator, then the parent will trigger star-tree pre-computation. + // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators + return LeafBucketCollector.NO_OP_COLLECTOR; + } return getStarTreeCollector(ctx, sub, supportedStarTree); } return getDefaultLeafCollector(ctx, sub); @@ -249,4 +256,27 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue }); return result[0] != null ? converter.apply(result[0]) : null; } + + /** + * The parent aggregator invokes this method to get a StarTreeBucketCollector, + * which exposes collectStarTreeEntry() to be evaluated on filtered star tree entries + */ + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException { + return StarTreeQueryHelper.getStarTreeBucketMetricCollector( + starTree, + MetricStat.MAX.getTypeName(), + valuesSource, + parentCollector, + (bucket) -> { + long from = maxes.size(); + maxes = context.bigArrays().grow(maxes, bucket + 1); + maxes.fill(from, maxes.size(), Double.NEGATIVE_INFINITY); + }, + (bucket, metricValue) -> maxes.set(bucket, Math.max(maxes.get(bucket), (NumericUtils.sortableLongToDouble(metricValue)))) + ); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java index a9f20bdeb5fd5..febb227dd4e2a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java @@ -52,6 +52,8 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -68,7 +70,7 @@ * * @opensearch.internal */ -class MinAggregator extends NumericMetricsAggregator.SingleValue { +class MinAggregator extends NumericMetricsAggregator.SingleValue implements StarTreePreComputeCollector { private static final int MAX_BKD_LOOKUPS = 1024; final ValuesSource.Numeric valuesSource; @@ -129,6 +131,11 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); if (supportedStarTree != null) { + if (parent != null && subAggregators.length == 0) { + // If this a child aggregator, then the parent will trigger star-tree pre-computation. + // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators + return LeafBucketCollector.NO_OP_COLLECTOR; + } return getStarTreeCollector(ctx, sub, supportedStarTree); } return getDefaultLeafCollector(ctx, sub); @@ -243,4 +250,27 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue } catch (CollectionTerminatedException e) {} return result[0]; } + + /** + * The parent aggregator invokes this method to get a StarTreeBucketCollector, + * which exposes collectStarTreeEntry() to be evaluated on filtered star tree entries + */ + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException { + return StarTreeQueryHelper.getStarTreeBucketMetricCollector( + starTree, + MetricStat.MIN.getTypeName(), + valuesSource, + parentCollector, + (bucket) -> { + long from = mins.size(); + mins = context.bigArrays().grow(mins, bucket + 1); + mins.fill(from, mins.size(), Double.POSITIVE_INFINITY); + }, + (bucket, metricValue) -> mins.set(bucket, Math.min(mins.get(bucket), NumericUtils.sortableLongToDouble(metricValue))) + ); + } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java index 3d237a94c5699..7376cc1e93b41 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java @@ -46,6 +46,8 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -60,7 +62,7 @@ * * @opensearch.internal */ -public class SumAggregator extends NumericMetricsAggregator.SingleValue { +public class SumAggregator extends NumericMetricsAggregator.SingleValue implements StarTreePreComputeCollector { private final ValuesSource.Numeric valuesSource; private final DocValueFormat format; @@ -98,6 +100,11 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); if (supportedStarTree != null) { + if (parent != null && subAggregators.length == 0) { + // If this a child aggregator, then the parent will trigger star-tree pre-computation. + // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators + return LeafBucketCollector.NO_OP_COLLECTOR; + } return getStarTreeCollector(ctx, sub, supportedStarTree); } return getDefaultLeafCollector(ctx, sub); @@ -135,7 +142,8 @@ public void collect(int doc, long bucket) throws IOException { public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { - final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), 0); + final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), compensations.get(0)); + return StarTreeQueryHelper.getStarTreeLeafCollector( context, valuesSource, @@ -144,7 +152,38 @@ public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucke starTree, MetricStat.SUM.getTypeName(), value -> kahanSummation.add(NumericUtils.sortableLongToDouble(value)), - () -> sums.set(0, kahanSummation.value()) + () -> { + sums.set(0, kahanSummation.value()); + compensations.set(0, kahanSummation.delta()); + } + ); + } + + /** + * The parent aggregator invokes this method to get a StarTreeBucketCollector, + * which exposes collectStarTreeEntry() to be evaluated on filtered star tree entries + */ + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException { + final CompensatedSum kahanSummation = new CompensatedSum(0, 0); + return StarTreeQueryHelper.getStarTreeBucketMetricCollector( + starTree, + MetricStat.SUM.getTypeName(), + valuesSource, + parentCollector, + (bucket) -> { + sums = context.bigArrays().grow(sums, bucket + 1); + compensations = context.bigArrays().grow(compensations, bucket + 1); + }, + (bucket, metricValue) -> { + kahanSummation.reset(sums.get(bucket), compensations.get(bucket)); + kahanSummation.add(NumericUtils.sortableLongToDouble(metricValue)); + sums.set(bucket, kahanSummation.value()); + compensations.set(bucket, kahanSummation.delta()); + } ); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index a156ec49983fa..f6f4a8a56eddc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -46,6 +46,8 @@ import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.LeafBucketCollectorBase; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.StarTreePreComputeCollector; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; @@ -63,7 +65,7 @@ * * @opensearch.internal */ -public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { +public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue implements StarTreePreComputeCollector { final ValuesSource valuesSource; @@ -96,6 +98,11 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); if (supportedStarTree != null) { + if (parent != null && subAggregators.length == 0) { + // If this a child aggregator, then the parent will trigger star-tree pre-computation. + // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators + return LeafBucketCollector.NO_OP_COLLECTOR; + } return getStarTreeCollector(ctx, sub, supportedStarTree); } @@ -180,4 +187,22 @@ public void doClose() { Releasables.close(counts); } + /** + * The parent aggregator invokes this method to get a StarTreeBucketCollector, + * which exposes collectStarTreeEntry() to be evaluated on filtered star tree entries + */ + public StarTreeBucketCollector getStarTreeBucketCollector( + LeafReaderContext ctx, + CompositeIndexFieldInfo starTree, + StarTreeBucketCollector parentCollector + ) throws IOException { + return StarTreeQueryHelper.getStarTreeBucketMetricCollector( + starTree, + MetricStat.VALUE_COUNT.getTypeName(), + (ValuesSource.Numeric) valuesSource, + parentCollector, + (bucket) -> counts = context.bigArrays().grow(counts, bucket + 1), + (bucket, metricValue) -> counts.increment(bucket, metricValue) + ); + } } diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java b/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java index f7fa210691678..ea2c43a40f330 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java @@ -47,9 +47,13 @@ public class StarTreeFilter { * First go over the star tree and try to match as many dimensions as possible * For the remaining columns, use star-tree doc values to match them */ - public static FixedBitSet getStarTreeResult(StarTreeValues starTreeValues, Map predicateEvaluators) throws IOException { + public static FixedBitSet getStarTreeResult( + StarTreeValues starTreeValues, + Map predicateEvaluators, + Set groupByField + ) throws IOException { Map queryMap = predicateEvaluators != null ? predicateEvaluators : Collections.emptyMap(); - StarTreeResult starTreeResult = traverseStarTree(starTreeValues, queryMap); + StarTreeResult starTreeResult = traverseStarTree(starTreeValues, queryMap, groupByField); // Initialize FixedBitSet with size maxMatchedDoc + 1 FixedBitSet bitSet = new FixedBitSet(starTreeResult.maxMatchedDoc + 1); @@ -113,7 +117,8 @@ public static FixedBitSet getStarTreeResult(StarTreeValues starTreeValues, Map queryMap) throws IOException { + private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Map queryMap, Set groupbyField) + throws IOException { DocIdSetBuilder docsWithField = new DocIdSetBuilder(starTreeValues.getStarTreeDocumentCount()); DocIdSetBuilder.BulkAdder adder; Set globalRemainingPredicateColumns = null; @@ -129,6 +134,7 @@ private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Ma queue.add(starTree); int currentDimensionId = -1; Set remainingPredicateColumns = new HashSet<>(queryMap.keySet()); + Set remainingGroupByColumns = new HashSet<>(groupbyField); int matchedDocsCountInStarTree = 0; int maxDocNum = -1; StarTreeNode starTreeNode; @@ -139,13 +145,14 @@ private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Ma if (dimensionId > currentDimensionId) { String dimension = dimensionNames.get(dimensionId); remainingPredicateColumns.remove(dimension); + remainingGroupByColumns.remove(dimension); if (foundLeafNode && globalRemainingPredicateColumns == null) { globalRemainingPredicateColumns = new HashSet<>(remainingPredicateColumns); } currentDimensionId = dimensionId; } - if (remainingPredicateColumns.isEmpty()) { + if (remainingPredicateColumns.isEmpty() && remainingGroupByColumns.isEmpty()) { int docId = starTreeNode.getAggregatedDocId(); docIds.add(docId); matchedDocsCountInStarTree++; @@ -164,7 +171,8 @@ private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Ma String childDimension = dimensionNames.get(dimensionId + 1); StarTreeNode starNode = null; - if (globalRemainingPredicateColumns == null || !globalRemainingPredicateColumns.contains(childDimension)) { + if (((globalRemainingPredicateColumns == null || !globalRemainingPredicateColumns.contains(childDimension)) + && !remainingGroupByColumns.contains(childDimension))) { starNode = starTreeNode.getChildStarNode(); } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 3b32e9e4ac6b7..1beec828e849e 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -25,6 +25,12 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.search.aggregations.metrics.MedianAbsoluteDeviationAggregationBuilder; +import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; +import org.opensearch.search.aggregations.startree.DateHistogramAggregatorTests; import org.opensearch.search.aggregations.startree.StarTreeFilterTests; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.internal.AliasFilter; @@ -37,12 +43,27 @@ import java.io.IOException; import java.util.Map; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.medianAbsoluteDeviation; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; +/** + * Tests for validating query shapes which can be resolved using star-tree index + * For valid resolvable (with star-tree) cases, StarTreeQueryContext is created and populated with the SearchContext + * For non-resolvable (with star-tree) cases, StarTreeQueryContext is null + */ public class SearchServiceStarTreeTests extends OpenSearchSingleNodeTestCase { - public void testParseQueryToOriginalOrStarTreeQuery() throws IOException { + private static final String TIMESTAMP_FIELD = "@timestamp"; + private static final String FIELD_NAME = "status"; + + /** + * Test query parsing for non-nested metric aggregations, with/without numeric term query + */ + public void testQueryParsingForMetricAggregations() throws IOException { FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); setStarTreeIndexSetting("true"); @@ -81,10 +102,8 @@ public void testParseQueryToOriginalOrStarTreeQuery() throws IOException { sourceBuilder = new SearchSourceBuilder().query(new MatchAllQueryBuilder()); assertStarTreeContext(request, sourceBuilder, null, -1); - // Case 3: MatchAllQuery and aggregations present, should use star tree - sourceBuilder = new SearchSourceBuilder().size(0) - .query(new MatchAllQueryBuilder()) - .aggregation(AggregationBuilders.max("test").field("field")); + // Case 3: MatchAllQuery and metric aggregations present, should use star tree + sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(max("test").field("field")); CompositeIndexFieldInfo expectedStarTree = new CompositeIndexFieldInfo( "startree", CompositeMappedFieldType.CompositeFieldType.STAR_TREE @@ -92,36 +111,198 @@ public void testParseQueryToOriginalOrStarTreeQuery() throws IOException { Map expectedQueryMap = null; assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); - // Case 4: MatchAllQuery and aggregations present, but postFilter specified, should not use star tree + // Case 4: MatchAllQuery and metric aggregations present, but postFilter specified, should not use star tree sourceBuilder = new SearchSourceBuilder().size(0) .query(new MatchAllQueryBuilder()) - .aggregation(AggregationBuilders.max("test").field("field")) + .aggregation(max("test").field("field")) .postFilter(new MatchAllQueryBuilder()); assertStarTreeContext(request, sourceBuilder, null, -1); - // Case 5: TermQuery and single aggregation, should use star tree, but not initialize query cache - sourceBuilder = new SearchSourceBuilder().size(0) - .query(new TermQueryBuilder("sndv", 1)) - .aggregation(AggregationBuilders.max("test").field("field")); + // Case 5: TermQuery and single metric aggregation, should use star tree, but not initialize query cache + sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder("sndv", 1)).aggregation(max("test").field("field")); expectedQueryMap = Map.of("sndv", 1L); assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); - // Case 6: TermQuery and multiple aggregations present, should use star tree & initialize cache + // Case 6: TermQuery and multiple metric aggregations present, should use star tree & initialize cache sourceBuilder = new SearchSourceBuilder().size(0) .query(new TermQueryBuilder("sndv", 1)) - .aggregation(AggregationBuilders.max("test").field("field")) + .aggregation(max("test").field("field")) .aggregation(AggregationBuilders.sum("test2").field("field")); expectedQueryMap = Map.of("sndv", 1L); assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, 0), 0); // Case 7: No query, metric aggregations present, should use star tree - sourceBuilder = new SearchSourceBuilder().size(0).aggregation(AggregationBuilders.max("test").field("field")); + sourceBuilder = new SearchSourceBuilder().size(0).aggregation(max("test").field("field")); assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, null, -1), -1); setStarTreeIndexSetting(null); } - private void setStarTreeIndexSetting(String value) throws IOException { + /** + * Test query parsing for date histogram aggregations, with/without numeric term query + */ + public void testQueryParsingForDateHistogramAggregations() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + setStarTreeIndexSetting("true"); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + CreateIndexRequestBuilder builder = client().admin() + .indices() + .prepareCreate("test") + .setSettings(settings) + .setMapping(DateHistogramAggregatorTests.getExpandedMapping(1, false)); + createIndex("test", builder); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + -1, + null, + null + ); + + MaxAggregationBuilder maxAggNoSub = max("max").field(FIELD_NAME); + MaxAggregationBuilder sumAggNoSub = max("sum").field(FIELD_NAME); + SumAggregationBuilder sumAggSub = sum("sum").field(FIELD_NAME).subAggregation(maxAggNoSub); + MedianAbsoluteDeviationAggregationBuilder medianAgg = medianAbsoluteDeviation("median").field(FIELD_NAME); + + // Case 1: No query or aggregations, should not use star tree + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 2: MatchAllQuery present but no aggregations, should not use star tree + sourceBuilder = new SearchSourceBuilder().query(new MatchAllQueryBuilder()); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 3: MatchAllQuery and non-nested metric aggregations is nested within date-histogram aggregation, should use star tree + DateHistogramAggregationBuilder dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.DAY) + .subAggregation(maxAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(dateHistogramAggregationBuilder); + CompositeIndexFieldInfo expectedStarTree = new CompositeIndexFieldInfo( + "startree1", + CompositeMappedFieldType.CompositeFieldType.STAR_TREE + ); + Map expectedQueryMap = null; + assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + + // Case 4: MatchAllQuery and nested-metric aggregations is nested within date-histogram aggregation, should not use star tree + dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.DAY) + .subAggregation(sumAggSub); + sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(dateHistogramAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 5: MatchAllQuery and non-startree supported aggregation nested within date-histogram aggregation, should not use star tree + dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.DAY) + .subAggregation(medianAgg); + sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(dateHistogramAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 6: NumericTermQuery and date-histogram aggregation present, should use star tree + dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.DAY) + .subAggregation(maxAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0) + .query(new TermQueryBuilder(FIELD_NAME, 1)) + .aggregation(dateHistogramAggregationBuilder); + expectedQueryMap = Map.of(FIELD_NAME, 1L); + assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + + // Case 7: Date histogram with non calendar interval: rounding is null for DateHistogramFactory - cannot use star-tree + dateHistogramAggregationBuilder = dateHistogram("non_cal").field(TIMESTAMP_FIELD) + .fixedInterval(DateHistogramInterval.DAY) + .subAggregation(maxAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0).aggregation(dateHistogramAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 8: Date histogram with no metric aggregation - does not use star-tree + dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD).calendarInterval(DateHistogramInterval.DAY); + sourceBuilder = new SearchSourceBuilder().size(0).aggregation(dateHistogramAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 9: Date histogram with no valid time interval to resolve aggregation - should not use star-tree + dateHistogramAggregationBuilder = dateHistogram("by_sec").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.SECOND) + .subAggregation(maxAggNoSub); + sourceBuilder = new SearchSourceBuilder().size(0).aggregation(dateHistogramAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, null, -1); + + // Case 10: Date histogram nested with multiple non-nested metric aggregations - should use star-tree + dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.DAY) + .subAggregation(maxAggNoSub) + .subAggregation(sumAggNoSub); + expectedQueryMap = null; + sourceBuilder = new SearchSourceBuilder().size(0).aggregation(dateHistogramAggregationBuilder); + assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + + setStarTreeIndexSetting(null); + } + + /** + * Test query parsing for date histogram aggregations on star-tree index when @timestamp field does not exist + */ + public void testInvalidQueryParsingForDateHistogramAggregations() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + setStarTreeIndexSetting("true"); + + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + CreateIndexRequestBuilder builder = client().admin() + .indices() + .prepareCreate("test") + .setSettings(settings) + .setMapping(StarTreeFilterTests.getExpandedMapping(1, false)); + createIndex("test", builder); + + IndicesService indicesService = getInstanceFromNode(IndicesService.class); + IndexService indexService = indicesService.indexServiceSafe(resolveIndex("test")); + IndexShard indexShard = indexService.getShard(0); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + new SearchRequest().allowPartialSearchResults(true), + indexShard.shardId(), + 1, + new AliasFilter(null, Strings.EMPTY_ARRAY), + 1.0f, + -1, + null, + null + ); + + MaxAggregationBuilder maxAggNoSub = max("max").field(FIELD_NAME); + DateHistogramAggregationBuilder dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.DAY) + .subAggregation(maxAggNoSub); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(0) + .query(new MatchAllQueryBuilder()) + .aggregation(dateHistogramAggregationBuilder); + CompositeIndexFieldInfo expectedStarTree = new CompositeIndexFieldInfo( + "startree1", + CompositeMappedFieldType.CompositeFieldType.STAR_TREE + ); + assertStarTreeContext(request, sourceBuilder, null, -1); + + setStarTreeIndexSetting(null); + } + + private void setStarTreeIndexSetting(String value) { client().admin() .cluster() .prepareUpdateSettings() diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java new file mode 100644 index 0000000000000..564a86deff1af --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java @@ -0,0 +1,361 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import com.carrotsearch.randomizedtesting.RandomizedTest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.lucene101.Lucene101Codec; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.apache.lucene.tests.index.RandomIndexWriter; +import org.opensearch.common.Rounding; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.codec.composite.composite101.Composite101Codec; +import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; +import org.opensearch.index.compositeindex.datacube.DateDimension; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregatorTestCase; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.opensearch.search.aggregations.bucket.histogram.InternalDateHistogram; +import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; + +import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; +import static org.opensearch.search.aggregations.AggregationBuilders.avg; +import static org.opensearch.search.aggregations.AggregationBuilders.count; +import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.opensearch.search.aggregations.AggregationBuilders.max; +import static org.opensearch.search.aggregations.AggregationBuilders.min; +import static org.opensearch.search.aggregations.AggregationBuilders.sum; +import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; + +public class DateHistogramAggregatorTests extends DateHistogramAggregatorTestCase { + private static final String TIMESTAMP_FIELD = "@timestamp"; + private static final MappedFieldType TIMESTAMP_FIELD_TYPE = new DateFieldMapper.DateFieldType(TIMESTAMP_FIELD); + + private static final String FIELD_NAME = "status"; + private static final MappedFieldType NUMBER_FIELD_TYPE = new NumberFieldMapper.NumberFieldType( + FIELD_NAME, + NumberFieldMapper.NumberType.LONG + ); + + @Before + public void setup() { + FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); + } + + @After + public void teardown() throws IOException { + FeatureFlags.initializeFeatureFlags(Settings.EMPTY); + } + + protected Codec getCodec() { + final Logger testLogger = LogManager.getLogger(MetricAggregatorTests.class); + MapperService mapperService; + try { + mapperService = StarTreeDocValuesFormatTests.createMapperService(getExpandedMapping(1, false)); + } catch (IOException e) { + throw new RuntimeException(e); + } + return new Composite101Codec(Lucene101Codec.Mode.BEST_SPEED, mapperService, testLogger); + } + + public void testStarTreeDateHistogram() throws IOException { + Directory directory = newDirectory(); + IndexWriterConfig conf = newIndexWriterConfig(null); + conf.setCodec(getCodec()); + conf.setMergePolicy(newLogMergePolicy()); + RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); + + Random random = RandomizedTest.getRandom(); + int totalDocs = 100; + final String STATUS = "status"; + final String SIZE = "size"; + int val; + long date; + + List docs = new ArrayList<>(); + // Index 100 random documents + for (int i = 0; i < totalDocs; i++) { + Document doc = new Document(); + if (random.nextBoolean()) { + val = random.nextInt(10); // Random int between 0 and 9 for status + doc.add(new SortedNumericDocValuesField(STATUS, val)); + } + if (random.nextBoolean()) { + val = random.nextInt(100); // Random int between 0 and 99 for size + doc.add(new SortedNumericDocValuesField(SIZE, val)); + } + date = random.nextInt(180) * 24 * 60 * 60 * 1000L; // Random date within 180 days + doc.add(new SortedNumericDocValuesField(TIMESTAMP_FIELD, date)); + doc.add(new LongPoint(TIMESTAMP_FIELD, date)); + iw.addDocument(doc); + docs.add(doc); + } + + if (randomBoolean()) { + iw.forceMerge(1); + } + iw.close(); + + DirectoryReader ir = DirectoryReader.open(directory); + initValuesSourceRegistry(); + LeafReaderContext context = ir.leaves().get(0); + + SegmentReader reader = Lucene.segmentReader(context.reader()); + IndexSearcher indexSearcher = newSearcher(reader, false, false); + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); + CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); + + ValuesSourceAggregationBuilder[] agggBuilders = { + sum("_name").field(FIELD_NAME), + max("_name").field(FIELD_NAME), + min("_name").field(FIELD_NAME), + count("_name").field(FIELD_NAME), + avg("_name").field(FIELD_NAME) }; + + List supportedDimensions = new LinkedList<>(); + supportedDimensions.add(new NumericDimension(STATUS)); + supportedDimensions.add(new NumericDimension(SIZE)); + supportedDimensions.add( + new DateDimension( + TIMESTAMP_FIELD, + List.of( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MONTH_OF_YEAR), + new DateTimeUnitAdapter(Rounding.DateTimeUnit.DAY_OF_MONTH) + ), + DateFieldMapper.Resolution.MILLISECONDS + ) + ); + + for (ValuesSourceAggregationBuilder aggregationBuilder : agggBuilders) { + Query query = new MatchAllDocsQuery(); + QueryBuilder queryBuilder = null; + + DateHistogramAggregationBuilder dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.DAY) + .subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, dateHistogramAggregationBuilder, starTree, supportedDimensions); + + dateHistogramAggregationBuilder = dateHistogram("by_month").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.MONTH) + .subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, dateHistogramAggregationBuilder, starTree, supportedDimensions); + + // year not present in star-tree, but should be able to compute using @timestamp_day dimension + dateHistogramAggregationBuilder = dateHistogram("by_year").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.YEAR) + .subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, dateHistogramAggregationBuilder, starTree, supportedDimensions); + + // Numeric-terms query with date histogram + for (int cases = 0; cases < 100; cases++) { + String queryField; + long queryValue; + if (randomBoolean()) { + queryField = STATUS; + queryValue = random.nextInt(10); + } else { + queryField = SIZE; + queryValue = random.nextInt(20) - 15; + } + dateHistogramAggregationBuilder = dateHistogram("by_month").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.MONTH) + .subAggregation(aggregationBuilder); + query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); + queryBuilder = new TermQueryBuilder(queryField, queryValue); + testCase(indexSearcher, query, queryBuilder, dateHistogramAggregationBuilder, starTree, supportedDimensions); + + // year not present in star-tree, but should be able to compute using @timestamp_day dimension + dateHistogramAggregationBuilder = dateHistogram("by_year").field(TIMESTAMP_FIELD) + .calendarInterval(DateHistogramInterval.YEAR) + .subAggregation(aggregationBuilder); + testCase(indexSearcher, query, queryBuilder, dateHistogramAggregationBuilder, starTree, supportedDimensions); + } + } + ir.close(); + directory.close(); + } + + private void testCase( + IndexSearcher indexSearcher, + Query query, + QueryBuilder queryBuilder, + DateHistogramAggregationBuilder dateHistogramAggregationBuilder, + CompositeIndexFieldInfo starTree, + List supportedDimensions + ) throws IOException { + InternalDateHistogram starTreeAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + dateHistogramAggregationBuilder, + starTree, + supportedDimensions, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + true, + TIMESTAMP_FIELD_TYPE, + NUMBER_FIELD_TYPE + ); + + InternalDateHistogram defaultAggregation = searchAndReduceStarTree( + createIndexSettings(), + indexSearcher, + query, + queryBuilder, + dateHistogramAggregationBuilder, + null, + null, + null, + DEFAULT_MAX_BUCKETS, + false, + null, + false, + TIMESTAMP_FIELD_TYPE, + NUMBER_FIELD_TYPE + ); + + assertEquals(defaultAggregation.getBuckets().size(), starTreeAggregation.getBuckets().size()); + assertEquals(defaultAggregation.getBuckets(), starTreeAggregation.getBuckets()); + } + + public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipStarNodeCreationForStatusDimension) throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree1"); // Use the same name as the provided mapping + b.field("type", "star_tree"); + b.startObject("config"); + b.field("max_leaf_docs", maxLeafDocs); + if (skipStarNodeCreationForStatusDimension) { + b.startArray("skip_star_node_creation_for_dimensions"); + b.value("status"); // Skip for "status" dimension + b.endArray(); + } + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "status"); + b.endObject(); + b.startObject(); + b.field("name", "size"); + b.endObject(); + b.startObject(); + b.field("name", TIMESTAMP_FIELD); + b.startArray("calendar_intervals"); + b.value("month"); + b.value("day"); + b.endArray(); + b.endObject(); + b.endArray(); + b.startArray("metrics"); + b.startObject(); + b.field("name", "size"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.startObject(); + b.field("name", "status"); + b.startArray("stats"); + b.value("sum"); + b.value("value_count"); + b.value("min"); + b.value("max"); + b.endArray(); + b.endObject(); + b.endArray(); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.field("format", "strict_date_optional_time||epoch_second"); + b.endObject(); + b.startObject("message"); + b.field("type", "keyword"); + b.field("index", false); + b.field("doc_values", false); + b.endObject(); + b.startObject("clientip"); + b.field("type", "ip"); + b.endObject(); + b.startObject("request"); + b.field("type", "text"); + b.startObject("fields"); + b.startObject("raw"); + b.field("type", "keyword"); + b.field("ignore_above", 256); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.startObject("size"); + b.field("type", "integer"); + b.endObject(); + b.startObject("geoip"); + b.startObject("properties"); + b.startObject("country_name"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("city_name"); + b.field("type", "keyword"); + b.endObject(); + b.startObject("location"); + b.field("type", "geo_point"); + b.endObject(); + b.endObject(); + b.endObject(); + b.endObject(); + }); + } +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index eb891318dd087..ef8e858e3efe1 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -46,6 +46,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; @@ -229,7 +230,7 @@ private long getDocCountFromStarTree(CompositeIndexReader starTreeDocValuesReade List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(context, starTree); - FixedBitSet filteredValues = StarTreeFilter.getStarTreeResult(starTreeValues, filters); + FixedBitSet filteredValues = StarTreeFilter.getStarTreeResult(starTreeValues, filters, Set.of()); SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator( StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index ca7646c072e70..7ba2f1284d551 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -412,7 +412,8 @@ protected SearchContext createSearchContextWithStarTreeContext( // Mock SearchContextAggregations SearchContextAggregations searchContextAggregations = mock(SearchContextAggregations.class); AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); - when(searchContext.aggregations()).thenReturn(searchContextAggregations); + when(searchContext.aggregations()).thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer)); + when(searchContextAggregations.factories()).thenReturn(aggregatorFactories); if (aggregatorFactory != null) { From 65fce3565da113f3ffa6ec52f13b53b8e71c336b Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 27 Jan 2025 18:06:28 -0500 Subject: [PATCH 18/48] Update Gradle to 8.12.1 (#17145) Signed-off-by: Andriy Redko --- gradle/wrapper/gradle-wrapper.properties | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 8b3d2296213c2..c51246f2815f5 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12.1-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=7ebdac923867a3cec0098302416d1e3c6c0c729fc4e2e05c10637a8af33a76c5 +distributionSha256Sum=296742a352f0b20ec14b143fb684965ad66086c7810b7b255dee216670716175 From e77fc79adb599267084fbe3518902fb46c4f9069 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 27 Jan 2025 18:45:40 -0500 Subject: [PATCH 19/48] OpenJDK Update (January 2025 Patch releases) (#17139) Signed-off-by: Andriy Redko --- .../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++-- gradle/libs.versions.toml | 2 +- .../test/java/org/opensearch/common/time/DateUtilsTests.java | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java index 439d0de39584d..654af7da65662 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java @@ -77,9 +77,9 @@ import java.util.stream.Stream; public class DistroTestPlugin implements Plugin { - private static final String SYSTEM_JDK_VERSION = "23.0.1+11"; + private static final String SYSTEM_JDK_VERSION = "23.0.2+7"; private static final String SYSTEM_JDK_VENDOR = "adoptium"; - private static final String GRADLE_JDK_VERSION = "23.0.1+11"; + private static final String GRADLE_JDK_VERSION = "23.0.2+7"; private static final String GRADLE_JDK_VENDOR = "adoptium"; // all distributions used by distro tests. this is temporary until tests are per distribution diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 2230239983a01..0b6886ce011b2 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -3,7 +3,7 @@ opensearch = "3.0.0" lucene = "10.1.0" bundled_jdk_vendor = "adoptium" -bundled_jdk = "23.0.1+11" +bundled_jdk = "23.0.2+7" # optional dependencies spatial4j = "0.7" diff --git a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java index cb691f2177f6d..759f5e9705b18 100644 --- a/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java +++ b/server/src/test/java/org/opensearch/common/time/DateUtilsTests.java @@ -58,7 +58,9 @@ import static org.hamcrest.Matchers.is; public class DateUtilsTests extends OpenSearchTestCase { - private static final Set IGNORE = new HashSet<>(Arrays.asList("Antarctica/Vostok")); + private static final Set IGNORE = new HashSet<>( + Arrays.asList("America/Bahia_Banderas", "America/Hermosillo", "America/Mazatlan", "Mexico/BajaSur", "WET") + ); public void testTimezoneIds() { assertNull(DateUtils.dateTimeZoneToZoneId(null)); From 5e1273793480b6c50a74e84ceb4a7d682b9b5542 Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Tue, 28 Jan 2025 18:20:27 +0530 Subject: [PATCH 20/48] Adding support for append only indices (#17039) Signed-off-by: RS146BIJAY --- CHANGELOG.md | 1 + ...ppendOnlyIndexOperationRetryException.java | 47 ++++ .../action/bulk/AppendOnlyIndicesIT.java | 219 ++++++++++++++++++ .../bulk/BulkPrimaryExecutionContext.java | 43 ++-- .../action/bulk/TransportBulkAction.java | 47 ++++ .../cluster/metadata/IndexMetadata.java | 20 ++ .../common/settings/IndexScopedSettings.java | 1 + .../index/engine/InternalEngine.java | 48 ++-- .../BulkPrimaryExecutionContextTests.java | 30 +++ 9 files changed, 428 insertions(+), 28 deletions(-) create mode 100644 libs/core/src/main/java/org/opensearch/core/index/AppendOnlyIndexOperationRetryException.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/action/bulk/AppendOnlyIndicesIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 29e2e09a16ac1..239ef81f062f7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,6 +24,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/)) - Update script supports java.lang.String.sha1() and java.lang.String.sha256() methods ([#16923](https://github.com/opensearch-project/OpenSearch/pull/16923)) - Added a precaution to handle extreme date values during sorting to prevent `arithmetic_exception: long overflow` ([#16812](https://github.com/opensearch-project/OpenSearch/pull/16812)). +- Add support for append only indices([#17039](https://github.com/opensearch-project/OpenSearch/pull/17039)) - Add `verbose_pipeline` parameter to output each processor's execution details ([#16843](https://github.com/opensearch-project/OpenSearch/pull/16843)). - Add search replica stats to segment replication stats API ([#16678](https://github.com/opensearch-project/OpenSearch/pull/16678)) - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) diff --git a/libs/core/src/main/java/org/opensearch/core/index/AppendOnlyIndexOperationRetryException.java b/libs/core/src/main/java/org/opensearch/core/index/AppendOnlyIndexOperationRetryException.java new file mode 100644 index 0000000000000..231e1da7ff487 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/index/AppendOnlyIndexOperationRetryException.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.core.index; + +import org.opensearch.OpenSearchException; + +/** + * This exception indicates that retry has been made during indexing for AppendOnly index. If the response of any + * indexing request contains this Exception in the response, we do not need to add a translog entry for this request. + * + * @opensearch.internal + */ +public class AppendOnlyIndexOperationRetryException extends OpenSearchException { + public AppendOnlyIndexOperationRetryException(String message) { + super(message); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/AppendOnlyIndicesIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/AppendOnlyIndicesIT.java new file mode 100644 index 0000000000000..55c18b94f5486 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/AppendOnlyIndicesIT.java @@ -0,0 +1,219 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.bulk; + +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.ingest.IngestTestPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; +import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.containsString; + +public class AppendOnlyIndicesIT extends OpenSearchIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(IngestTestPlugin.class, MockTransportService.TestPlugin.class); + } + + public void testIndexDocumentWithACustomDocIdForAppendOnlyIndices() throws Exception { + Client client = internalCluster().coordOnlyNodeClient(); + assertAcked( + client().admin() + .indices() + .prepareCreate("index") + .setSettings( + Settings.builder() + .put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + ) + ); + ensureGreen("index"); + + BulkRequestBuilder bulkBuilder = client.prepareBulk(); + + XContentBuilder doc = null; + doc = jsonBuilder().startObject().field("foo", "bar").endObject(); + bulkBuilder.add(client.prepareIndex("index").setId(Integer.toString(0)).setSource(doc)); + + BulkResponse response = bulkBuilder.get(); + assertThat( + response.getItems()[0].getFailureMessage(), + containsString( + "Operation [INDEX] is not allowed with a custom document id 0 as setting `" + + IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey() + + "` is enabled for this index: index;" + ) + ); + } + + public void testUpdateDeleteDocumentForAppendOnlyIndices() throws Exception { + Client client = internalCluster().coordOnlyNodeClient(); + assertAcked( + client().admin() + .indices() + .prepareCreate("index") + .setSettings( + Settings.builder() + .put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + ) + ); + ensureGreen("index"); + + BulkRequestBuilder bulkBuilder = client.prepareBulk(); + + XContentBuilder doc = null; + doc = jsonBuilder().startObject().field("foo", "bar").endObject(); + bulkBuilder.add(client.prepareIndex("index").setSource(doc)); + + bulkBuilder.get(); + BulkResponse response = client().prepareBulk().add(client().prepareUpdate("index", "0").setDoc("foo", "updated")).get(); + assertThat( + response.getItems()[0].getFailureMessage(), + containsString( + "Operation [UPDATE] is not allowed as setting `" + + IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey() + + "` is enabled for this index" + ) + ); + + response = client().prepareBulk().add(client().prepareDelete("index", "0")).get(); + assertThat( + response.getItems()[0].getFailureMessage(), + containsString( + "Operation [DELETE] is not allowed as setting `" + + IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey() + + "` is enabled for this index" + ) + ); + } + + public void testRetryForAppendOnlyIndices() throws Exception { + final AtomicBoolean exceptionThrown = new AtomicBoolean(false); + int numDocs = scaledRandomIntBetween(100, 1000); + Client client = internalCluster().coordOnlyNodeClient(); + NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); + NodeStats unluckyNode = randomFrom( + nodeStats.getNodes().stream().filter((s) -> s.getNode().isDataNode()).collect(Collectors.toList()) + ); + assertAcked( + client().admin() + .indices() + .prepareCreate("index") + .setSettings( + Settings.builder() + .put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + ) + ); + ensureGreen("index"); + logger.info("unlucky node: {}", unluckyNode.getNode()); + // create a transport service that throws a ConnectTransportException for one bulk request and therefore triggers a retry. + for (NodeStats dataNode : nodeStats.getNodes()) { + if (exceptionThrown.get()) { + break; + } + + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + dataNode.getNode().getName() + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + (connection, requestId, action, request, options) -> { + connection.sendRequest(requestId, action, request, options); + if (action.equals(TransportShardBulkAction.ACTION_NAME) && exceptionThrown.compareAndSet(false, true)) { + logger.debug("Throw ConnectTransportException"); + throw new ConnectTransportException(connection.getNode(), action); + } + } + ); + } + + BulkRequestBuilder bulkBuilder = client.prepareBulk(); + + for (int i = 0; i < numDocs; i++) { + XContentBuilder doc = null; + doc = jsonBuilder().startObject().field("foo", "bar").endObject(); + bulkBuilder.add(client.prepareIndex("index").setSource(doc)); + } + + BulkResponse response = bulkBuilder.get(); + for (BulkItemResponse singleIndexResponse : response.getItems()) { + // Retry will not create a new version. + assertThat(singleIndexResponse.getVersion(), equalTo(1L)); + } + } + + public void testNodeReboot() throws Exception { + int numDocs = scaledRandomIntBetween(100, 1000); + Client client = internalCluster().coordOnlyNodeClient(); + assertAcked( + client().admin() + .indices() + .prepareCreate("index") + .setSettings( + Settings.builder() + .put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0) + .put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + ) + ); + + ensureGreen("index"); + + BulkRequestBuilder bulkBuilder = client.prepareBulk(); + + for (int i = 0; i < numDocs; i++) { + XContentBuilder doc = null; + doc = jsonBuilder().startObject().field("foo", "bar").endObject(); + bulkBuilder.add(client.prepareIndex("index").setSource(doc)); + } + + BulkResponse response = bulkBuilder.get(); + assertFalse(response.hasFailures()); + internalCluster().restartRandomDataNode(); + ensureGreen("index"); + refresh(); + SearchResponse searchResponse = client().prepareSearch() + .setQuery(QueryBuilders.matchAllQuery()) + .setIndices("index") + .setSize(numDocs) + .get(); + + assertBusy(() -> { assertHitCount(searchResponse, numDocs); }, 20L, TimeUnit.SECONDS); + + } +} diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java index 4e770f5851bc6..08373481d5711 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkPrimaryExecutionContext.java @@ -38,6 +38,7 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.action.support.replication.TransportWriteAction; +import org.opensearch.core.index.AppendOnlyIndexOperationRetryException; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.translog.Translog; @@ -297,20 +298,36 @@ public void markOperationAsExecuted(Engine.Result result) { locationToSync = TransportWriteAction.locationToSync(locationToSync, result.getTranslogLocation()); break; case FAILURE: - executionResult = new BulkItemResponse( - current.id(), - docWriteRequest.opType(), - // Make sure to use request.index() here, if you - // use docWriteRequest.index() it will use the - // concrete index instead of an alias if used! - new BulkItemResponse.Failure( - request.index(), - docWriteRequest.id(), - result.getFailure(), + if (result.getFailure() instanceof AppendOnlyIndexOperationRetryException) { + Engine.IndexResult indexResult = (Engine.IndexResult) result; + DocWriteResponse indexResponse = new IndexResponse( + primary.shardId(), + requestToExecute.id(), result.getSeqNo(), - result.getTerm() - ) - ); + result.getTerm(), + indexResult.getVersion(), + indexResult.isCreated() + ); + + executionResult = new BulkItemResponse(current.id(), current.request().opType(), indexResponse); + // set a blank ShardInfo so we can safely send it to the replicas. We won't use it in the real response though. + executionResult.getResponse().setShardInfo(new ReplicationResponse.ShardInfo()); + } else { + executionResult = new BulkItemResponse( + current.id(), + docWriteRequest.opType(), + // Make sure to use request.index() here, if you + // use docWriteRequest.index() it will use the + // concrete index instead of an alias if used! + new BulkItemResponse.Failure( + request.index(), + docWriteRequest.id(), + result.getFailure(), + result.getSeqNo(), + result.getTerm() + ) + ); + } break; default: throw new AssertionError("unknown result type for " + getCurrentItem() + ": " + result.getResultType()); diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index db509afb68da9..2cb468098d997 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -66,6 +66,7 @@ import org.opensearch.cluster.metadata.MappingMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.ValidationException; import org.opensearch.common.inject.Inject; import org.opensearch.common.lease.Releasable; import org.opensearch.common.unit.TimeValue; @@ -540,12 +541,17 @@ protected void doRun() { if (docWriteRequest == null) { continue; } + if (addFailureIfRequiresAliasAndAliasIsMissing(docWriteRequest, i, metadata)) { continue; } if (addFailureIfIndexIsUnavailable(docWriteRequest, i, concreteIndices, metadata)) { continue; } + if (addFailureIfAppendOnlyIndexAndOpsDeleteOrUpdate(docWriteRequest, i, concreteIndices, metadata)) { + continue; + } + Index concreteIndex = concreteIndices.resolveIfAbsent(docWriteRequest); try { // The ConcreteIndices#resolveIfAbsent(...) method validates via IndexNameExpressionResolver whether @@ -749,6 +755,47 @@ public void onTimeout(TimeValue timeout) { }); } + private boolean addFailureIfAppendOnlyIndexAndOpsDeleteOrUpdate( + DocWriteRequest request, + int idx, + final ConcreteIndices concreteIndices, + Metadata metadata + ) { + Index concreteIndex = concreteIndices.resolveIfAbsent(request); + final IndexMetadata indexMetadata = metadata.index(concreteIndex); + if (indexMetadata.isAppendOnlyIndex()) { + if ((request.opType() == DocWriteRequest.OpType.UPDATE || request.opType() == DocWriteRequest.OpType.DELETE)) { + ValidationException exception = new ValidationException(); + exception.addValidationError( + "Operation [" + + request.opType() + + "] is not allowed as setting `" + + IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey() + + "` is enabled for this index: " + + request.index() + ); + addFailure(request, idx, exception); + return true; + } else if (request.id() != null && request.opType() == DocWriteRequest.OpType.INDEX) { + ValidationException exception = new ValidationException(); + exception.addValidationError( + "Operation [" + + request.opType() + + "] is not allowed with a custom document id " + + request.id() + + " as setting `" + + IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey() + + "` is enabled for this index: " + + request.index() + ); + addFailure(request, idx, exception); + return true; + } + } + + return false; + } + private boolean addFailureIfRequiresAliasAndAliasIsMissing(DocWriteRequest request, int idx, final Metadata metadata) { if (request.isRequireAlias() && (metadata.hasAlias(request.index()) == false)) { Exception exception = new IndexNotFoundException( diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index cee331788e4b7..e09630d813ebf 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -351,6 +351,7 @@ public Iterator> settings() { ); public static final String SETTING_REMOTE_STORE_ENABLED = "index.remote_store.enabled"; + public static final String SETTING_INDEX_APPEND_ONLY_ENABLED = "index.append_only.enabled"; public static final String SETTING_REMOTE_SEGMENT_STORE_REPOSITORY = "index.remote_store.segment.repository"; @@ -393,6 +394,16 @@ public Iterator> settings() { Property.Dynamic ); + /** + * Used to specify if the index data should be persisted in the remote store. + */ + public static final Setting INDEX_APPEND_ONLY_ENABLED_SETTING = Setting.boolSetting( + SETTING_INDEX_APPEND_ONLY_ENABLED, + false, + Property.IndexScope, + Property.Final + ); + /** * Used to specify remote store repository to use for this index. */ @@ -722,6 +733,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { private final boolean isRemoteSnapshot; private final int indexTotalShardsPerNodeLimit; + private final boolean isAppendOnlyIndex; private final Context context; @@ -753,6 +765,7 @@ private IndexMetadata( final Map rolloverInfos, final boolean isSystem, final int indexTotalShardsPerNodeLimit, + boolean isAppendOnlyIndex, final Context context ) { @@ -790,6 +803,7 @@ private IndexMetadata( this.isSystem = isSystem; this.isRemoteSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match(this.settings); this.indexTotalShardsPerNodeLimit = indexTotalShardsPerNodeLimit; + this.isAppendOnlyIndex = isAppendOnlyIndex; this.context = context; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -952,6 +966,10 @@ public int getIndexTotalShardsPerNodeLimit() { return this.indexTotalShardsPerNodeLimit; } + public boolean isAppendOnlyIndex() { + return this.isAppendOnlyIndex; + } + @Nullable public DiscoveryNodeFilters requireFilters() { return requireFilters; @@ -1745,6 +1763,7 @@ public IndexMetadata build() { } final int indexTotalShardsPerNodeLimit = ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + final boolean isAppendOnlyIndex = INDEX_APPEND_ONLY_ENABLED_SETTING.get(settings); final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); @@ -1776,6 +1795,7 @@ public IndexMetadata build() { rolloverInfos, isSystem, indexTotalShardsPerNodeLimit, + isAppendOnlyIndex, context ); } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 8d56a942c5d6e..387ed0ed92680 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -109,6 +109,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_FORMAT_SETTING, IndexMetadata.INDEX_HIDDEN_SETTING, IndexMetadata.INDEX_REPLICATION_TYPE_SETTING, + IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING, diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 59165b936aec8..43279ac3b2281 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -88,6 +88,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.Assertions; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.AppendOnlyIndexOperationRetryException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; @@ -932,19 +933,21 @@ public IndexResult index(Index index) throws IOException { final Translog.Location location; if (indexResult.getResultType() == Result.Type.SUCCESS) { location = translogManager.add(new Translog.Index(index, indexResult)); - } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO) { - // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no - final NoOp noOp = new NoOp( - indexResult.getSeqNo(), - index.primaryTerm(), - index.origin(), - index.startTime(), - indexResult.getFailure().toString() - ); - location = innerNoOp(noOp).getTranslogLocation(); - } else { - location = null; - } + } else if (indexResult.getSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO + && indexResult.getFailure() != null + && !(indexResult.getFailure() instanceof AppendOnlyIndexOperationRetryException)) { + // if we have document failure, record it as a no-op in the translog and Lucene with the generated seq_no + final NoOp noOp = new NoOp( + indexResult.getSeqNo(), + index.primaryTerm(), + index.origin(), + index.startTime(), + indexResult.getFailure().toString() + ); + location = innerNoOp(noOp).getTranslogLocation(); + } else { + location = null; + } indexResult.setTranslogLocation(location); } if (plan.indexIntoLucene && indexResult.getResultType() == Result.Type.SUCCESS) { @@ -955,7 +958,9 @@ public IndexResult index(Index index) throws IOException { ); } localCheckpointTracker.markSeqNoAsProcessed(indexResult.getSeqNo()); - if (indexResult.getTranslogLocation() == null) { + if (indexResult.getTranslogLocation() == null + && !(indexResult.getFailure() != null + && (indexResult.getFailure() instanceof AppendOnlyIndexOperationRetryException))) { // the op is coming from the translog (and is hence persisted already) or it does not have a sequence number assert index.origin().isFromTranslog() || indexResult.getSeqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO; localCheckpointTracker.markSeqNoAsPersisted(indexResult.getSeqNo()); @@ -1049,7 +1054,7 @@ private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { } else { versionMap.enforceSafeAccess(); // resolves incoming version - final VersionValue versionValue = resolveDocVersion(index, index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO); + final VersionValue versionValue = resolveDocVersion(index, true); final long currentVersion; final boolean currentNotFoundOrDeleted; if (versionValue == null) { @@ -1092,6 +1097,15 @@ private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { final Exception reserveError = tryAcquireInFlightDocs(index, reservingDocs); if (reserveError != null) { plan = IndexingStrategy.failAsTooManyDocs(reserveError); + } else if (currentVersion >= 1 && engineConfig.getIndexSettings().getIndexMetadata().isAppendOnlyIndex()) { + // Retry happens for indexing requests for append only indices, since we are rejecting update requests + // at Transport layer itself. So for any retry, we are reconstructing response from already indexed + // document version for append only index. + AppendOnlyIndexOperationRetryException retryException = new AppendOnlyIndexOperationRetryException( + "Indexing operation retried for append only indices" + ); + final IndexResult result = new IndexResult(retryException, currentVersion, versionValue.term, versionValue.seqNo); + plan = IndexingStrategy.failAsIndexAppendOnly(result, currentVersion, 0); } else { plan = IndexingStrategy.processNormally( currentNotFoundOrDeleted, @@ -1283,6 +1297,10 @@ static IndexingStrategy failAsTooManyDocs(Exception e) { final IndexResult result = new IndexResult(e, Versions.NOT_FOUND); return new IndexingStrategy(false, false, false, false, Versions.NOT_FOUND, 0, result); } + + static IndexingStrategy failAsIndexAppendOnly(IndexResult result, long versionForIndexing, int reservedDocs) { + return new IndexingStrategy(false, false, false, true, versionForIndexing, reservedDocs, result); + } } /** diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java index de096aee45bf9..9745203e91586 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkPrimaryExecutionContextTests.java @@ -40,6 +40,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.update.UpdateRequest; +import org.opensearch.core.index.AppendOnlyIndexOperationRetryException; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; @@ -79,6 +80,35 @@ public void testAbortedSkipped() { assertThat(visitedRequests, equalTo(nonAbortedRequests)); } + public void testAppendOnlyIndexOperationRetryException() { + BulkShardRequest shardRequest = generateRandomRequest(); + + final IndexShard primary = mock(IndexShard.class); + when(primary.shardId()).thenReturn(shardRequest.shardId()); + ArrayList> nonAbortedRequests = new ArrayList<>(); + for (BulkItemRequest request : shardRequest.items()) { + if (randomBoolean()) { + request.abort("index", new AppendOnlyIndexOperationRetryException("Indexing operation retried for append only indices")); + } else { + nonAbortedRequests.add(request.request()); + } + } + + ArrayList> visitedRequests = new ArrayList<>(); + for (BulkPrimaryExecutionContext context = new BulkPrimaryExecutionContext(shardRequest, primary); context + .hasMoreOperationsToExecute();) { + visitedRequests.add(context.getCurrent()); + context.setRequestToExecute(context.getCurrent()); + // using failures prevents caring about types + context.markOperationAsExecuted( + new Engine.IndexResult(new AppendOnlyIndexOperationRetryException("Indexing operation retried for append only indices"), 1) + ); + context.markAsCompleted(context.getExecutionResult()); + } + + assertThat(visitedRequests, equalTo(nonAbortedRequests)); + } + private BulkShardRequest generateRandomRequest() { BulkItemRequest[] items = new BulkItemRequest[randomInt(20)]; for (int i = 0; i < items.length; i++) { From 8ec93ae4e178c391b916a721ea4884ef559fa796 Mon Sep 17 00:00:00 2001 From: Shailesh Singh Date: Tue, 28 Jan 2025 20:06:41 +0530 Subject: [PATCH 21/48] Add Star Tree unsigned-long indexing changes (#17156) Signed-off-by: Shailesh Singh --- .../index/mapper/StarTreeMapperIT.java | 10 + .../Composite912DocValuesReader.java | 7 +- .../compositeindex/datacube/Dimension.java | 20 +- .../datacube/DimensionDataType.java | 52 +++ .../datacube/DimensionFactory.java | 4 + .../datacube/DimensionType.java | 6 + .../datacube/ReadDimension.java | 15 + .../datacube/UnsignedLongDimension.java | 43 +++ .../startree/builder/BaseStarTreeBuilder.java | 6 + .../builder/OffHeapStarTreeBuilder.java | 6 +- .../builder/OnHeapStarTreeBuilder.java | 30 +- .../startree/fileformats/StarTreeWriter.java | 5 +- .../fileformats/meta/DimensionConfig.java | 38 ++ .../fileformats/meta/StarTreeMetadata.java | 75 ++-- .../meta/StarTreeMetadataWriter.java | 30 +- .../startree/index/StarTreeValues.java | 7 +- .../datacube/startree/node/StarTreeNode.java | 8 +- .../utils/StarTreeDocumentsSorter.java | 19 +- .../index/mapper/NumberFieldMapper.java | 12 +- .../search/startree/StarTreeFilter.java | 7 +- .../StarTreeDocValuesFormatTests.java | 40 +- .../datacube/startree/StarTreeTestUtils.java | 40 +- .../startree/builder/BuilderTestsUtils.java | 7 +- .../builder/StarTreeBuildMetricTests.java | 174 ++++++++- .../StarTreeBuilderFlushFlowTests.java | 154 +++++++- .../StarTreeBuilderMergeFlowTests.java | 352 ++++++++++++++++-- .../builder/StarTreeBuilderTestCase.java | 77 +++- .../meta/StarTreeMetadataTests.java | 14 +- .../utils/StarTreeDocumentsSorterTests.java | 185 ++++++--- .../index/mapper/StarTreeMapperTests.java | 34 +- 30 files changed, 1226 insertions(+), 251 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/UnsignedLongDimension.java create mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/DimensionConfig.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 87577cf2e24cc..e90665b14adbf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -76,6 +76,9 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .startObject() .field("name", "keyword_dv") .endObject() + .startObject() + .field("name", "unsignedLongDimension") // UnsignedLongDimension + .endObject() .endArray() .startArray("metrics") .startObject() @@ -117,6 +120,10 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool .field("type", "wildcard") .field("doc_values", false) .endObject() + .startObject("unsignedLongDimension") + .field("type", "unsigned_long") + .field("doc_values", true) + .endObject() .endObject() .endObject(); } catch (IOException e) { @@ -605,8 +612,11 @@ public void testValidCompositeIndex() { for (int i = 0; i < dateDim.getSortedCalendarIntervals().size(); i++) { assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); } + assertEquals(4, starTreeFieldType.getDimensions().size()); assertEquals("numeric_dv", starTreeFieldType.getDimensions().get(1).getField()); assertEquals("keyword_dv", starTreeFieldType.getDimensions().get(2).getField()); + assertEquals("unsignedLongDimension", starTreeFieldType.getDimensions().get(3).getField()); + assertEquals("numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); diff --git a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java index 7178ffbadf9f1..ebb6afae57f02 100644 --- a/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java +++ b/server/src/main/java/org/opensearch/index/codec/composite/composite912/Composite912DocValuesReader.java @@ -36,6 +36,7 @@ import org.opensearch.index.compositeindex.CompositeIndexMetadata; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.DimensionConfig; import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.index.CompositeIndexValues; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; @@ -157,15 +158,15 @@ public Composite912DocValuesReader(DocValuesProducer producer, SegmentReadState compositeIndexInputMap.put(compositeFieldName, starTreeIndexInput); compositeIndexMetadataMap.put(compositeFieldName, starTreeMetadata); - Map dimensionFieldToDocValuesMap = starTreeMetadata.getDimensionFields(); + Map dimensionFieldToDocValuesMap = starTreeMetadata.getDimensionFields(); // generating star tree unique fields (fully qualified name for dimension and metrics) - for (Map.Entry dimensionEntry : dimensionFieldToDocValuesMap.entrySet()) { + for (Map.Entry dimensionEntry : dimensionFieldToDocValuesMap.entrySet()) { String dimName = fullyQualifiedFieldNameForStarTreeDimensionsDocValues( compositeFieldName, dimensionEntry.getKey() ); fields.add(dimName); - dimensionFieldTypeMap.put(dimName, dimensionEntry.getValue()); + dimensionFieldTypeMap.put(dimName, dimensionEntry.getValue().getDocValuesType()); } // adding metric fields for (Metric metric : starTreeMetadata.getMetrics()) { diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java index 3d71b38881693..366a979946e5f 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/Dimension.java @@ -12,6 +12,7 @@ import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.core.xcontent.ToXContent; +import java.util.Comparator; import java.util.List; import java.util.function.Consumer; @@ -34,8 +35,8 @@ public interface Dimension extends ToXContent { /** * Sets the dimension values with the consumer * - * @param value The value to be set - * @param dimSetter Consumer which sets the dimensions + * @param value The value to be set + * @param dimSetter Consumer which sets the dimensions */ void setDimensionValues(final Long value, final Consumer dimSetter); @@ -45,4 +46,19 @@ public interface Dimension extends ToXContent { List getSubDimensionNames(); DocValuesType getDocValuesType(); + + /** + * Returns the dimensionDataType used for comparing and parsing dimension values.
+ * This determines how numeric values are compared and parsed:
+ * - DimensionDataType.UNSIGNED_LONG for unsigned long values
+ * - DimensionDataType.LONG for all other numeric types (DEFAULT) + */ + default DimensionDataType getDimensionDataType() { + return DimensionDataType.LONG; + } + + default Comparator comparator() { + return (a, b) -> getDimensionDataType().compare(a, b); + } + } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java new file mode 100644 index 0000000000000..67138b69c69fa --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionDataType.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * Represents the data type of the dimension value. + * + * @opensearch.experimental + */ +@ExperimentalApi +public enum DimensionDataType { + LONG { + @Override + int compare(Long a, Long b) { + if (a == null && b == null) { + return 0; + } + if (b == null) { + return -1; + } + if (a == null) { + return 1; + } + return Long.compare(a, b); + } + }, + UNSIGNED_LONG { + @Override + int compare(Long a, Long b) { + if (a == null && b == null) { + return 0; + } + if (b == null) { + return -1; + } + if (a == null) { + return 1; + } + return Long.compareUnsigned(a, b); + } + }; + + abstract int compare(Long a, Long b); +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java index b1e78d78d3ad2..aed9d1d56a188 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionFactory.java @@ -45,6 +45,8 @@ public static Dimension parseAndCreateDimension( return parseAndCreateDateDimension(name, dimensionMap, c); case NumericDimension.NUMERIC: return new NumericDimension(name); + case UnsignedLongDimension.UNSIGNED_LONG: + return new UnsignedLongDimension(name); case ORDINAL: return new OrdinalDimension(name); case IP: @@ -72,6 +74,8 @@ public static Dimension parseAndCreateDimension( return parseAndCreateDateDimension(name, dimensionMap, c); case NUMERIC: return new NumericDimension(name); + case UNSIGNED_LONG: + return new UnsignedLongDimension(name); case ORDINAL: return new OrdinalDimension(name); case IP: diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java index f7911e72f36fc..8f9db7df596ed 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/DimensionType.java @@ -23,6 +23,12 @@ public enum DimensionType { */ NUMERIC, + /** + * Represents an unsigned long dimension type. + * This is used for dimensions that contain numerical values of type unsigned long. + */ + UNSIGNED_LONG, + /** * Represents a date dimension type. * This is used for dimensions that contain date or timestamp values. diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java index 384553a8f7e06..5a791188982ce 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/ReadDimension.java @@ -26,15 +26,24 @@ public class ReadDimension implements Dimension { public static final String READ = "read"; private final String field; private final DocValuesType docValuesType; + private final DimensionDataType dimensionDataType; public ReadDimension(String field) { this.field = field; this.docValuesType = DocValuesType.SORTED_NUMERIC; + this.dimensionDataType = DimensionDataType.LONG; } public ReadDimension(String field, DocValuesType docValuesType) { this.field = field; this.docValuesType = docValuesType; + this.dimensionDataType = DimensionDataType.LONG; + } + + public ReadDimension(String field, DocValuesType docValuesType, DimensionDataType dimensionDataType) { + this.field = field; + this.docValuesType = docValuesType; + this.dimensionDataType = dimensionDataType; } public String getField() { @@ -82,4 +91,10 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(field); } + + @Override + public DimensionDataType getDimensionDataType() { + return dimensionDataType; + } + } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/UnsignedLongDimension.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/UnsignedLongDimension.java new file mode 100644 index 0000000000000..21f1b291f4821 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/UnsignedLongDimension.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube; + +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; + +import java.io.IOException; + +/** + * Unsigned Long dimension class + * + * @opensearch.experimental + */ +public class UnsignedLongDimension extends NumericDimension { + + public static final String UNSIGNED_LONG = "unsigned_long"; + + public UnsignedLongDimension(String field) { + super(field); + } + + @Override + public DimensionDataType getDimensionDataType() { + return DimensionDataType.UNSIGNED_LONG; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(CompositeDataCubeFieldType.NAME, getField()); + builder.field(CompositeDataCubeFieldType.TYPE, UNSIGNED_LONG); + builder.endObject(); + return builder; + } + +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index cf36f2d7d4126..935c490b5a4dc 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -55,6 +55,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -112,6 +113,8 @@ public abstract class BaseStarTreeBuilder implements StarTreeBuilder { // This should be true for merge flows protected boolean isMerge = false; + protected final List> dimensionComparators = new ArrayList<>(); + /** * Reads all the configuration related to dimensions and metrics, builds a star-tree based on the different construction parameters. * @@ -136,6 +139,9 @@ protected BaseStarTreeBuilder( int numDims = 0; for (Dimension dim : starTreeField.getDimensionsOrder()) { numDims += dim.getNumSubDimensions(); + for (int i = 0; i < dim.getNumSubDimensions(); i++) { + dimensionComparators.add(dim.comparator()); + } dimensionsSplitOrder.add(dim); } this.numDimensions = numDims; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java index 63659ef684744..da48559461c49 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OffHeapStarTreeBuilder.java @@ -235,7 +235,7 @@ private Iterator sortAndReduceDocuments(int[] sortedDocIds, in } catch (IOException e) { throw new UncheckedIOException(e); } - }); + }, dimensionComparators); } catch (UncheckedIOException ex) { // Unwrap UncheckedIOException and throw as IOException if (ex.getCause() != null) { @@ -308,6 +308,7 @@ public List getStarTreeDocuments() throws IOException { @Override public Long getDimensionValue(int docId, int dimensionId) throws IOException { return starTreeDocumentFileManager.getDimensionValue(docId, dimensionId); + } /** @@ -334,7 +335,8 @@ public Iterator generateStarTreeDocumentsForStarNode(int start } catch (IOException e) { throw new RuntimeException(e); } - }); + }, dimensionComparators); + // Create an iterator for aggregated documents return new Iterator() { boolean hasNext = true; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java index c91f4c5db98bb..a590b2b69cbc8 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java @@ -42,8 +42,8 @@ public class OnHeapStarTreeBuilder extends BaseStarTreeBuilder { /** * Constructor for OnHeapStarTreeBuilder * - * @param metaOut an index output to write star-tree metadata - * @param dataOut an index output to write star-tree data + * @param metaOut an index output to write star-tree metadata + * @param dataOut an index output to write star-tree data * @param starTreeField star-tree field * @param segmentWriteState segment write state * @param mapperService helps with the numeric type of field @@ -82,9 +82,8 @@ public Long getDimensionValue(int docId, int dimensionId) { * Sorts and aggregates all the documents of the segment based on dimension and metrics configuration * * @param dimensionReaders List of docValues readers to read dimensions from the segment - * @param metricReaders List of docValues readers to read metrics from the segment + * @param metricReaders List of docValues readers to read metrics from the segment * @return Iterator of star-tree documents - * */ @Override public Iterator sortAndAggregateSegmentDocuments( @@ -161,7 +160,7 @@ StarTreeDocument[] getSegmentsStarTreeDocuments(List starTreeVal Iterator sortAndAggregateStarTreeDocuments(StarTreeDocument[] starTreeDocuments, boolean isMerge) { // sort all the documents - sortStarTreeDocumentsFromDimensionId(starTreeDocuments, 0); + sortStarTreeDocumentsFromDimensionId(starTreeDocuments, -1); // merge the documents return mergeStarTreeDocuments(starTreeDocuments, isMerge); @@ -222,7 +221,7 @@ public Iterator generateStarTreeDocumentsForStarNode(int start } // sort star tree documents from given dimension id (as previous dimension ids have already been processed) - sortStarTreeDocumentsFromDimensionId(starTreeDocuments, dimensionId + 1); + sortStarTreeDocumentsFromDimensionId(starTreeDocuments, dimensionId); return new Iterator() { boolean hasNext = true; @@ -267,22 +266,13 @@ public StarTreeDocument next() { * Sorts the star-tree documents from the given dimension id * * @param starTreeDocuments star-tree documents - * @param dimensionId id of the dimension + * @param dimensionId id of the dimension */ private void sortStarTreeDocumentsFromDimensionId(StarTreeDocument[] starTreeDocuments, int dimensionId) { - Arrays.sort(starTreeDocuments, (o1, o2) -> { - for (int i = dimensionId; i < numDimensions; i++) { - if (!Objects.equals(o1.dimensions[i], o2.dimensions[i])) { - if (o1.dimensions[i] == null && o2.dimensions[i] == null) { - return 0; - } - if (o1.dimensions[i] == null) { - return 1; - } - if (o2.dimensions[i] == null) { - return -1; - } - return Long.compare(o1.dimensions[i], o2.dimensions[i]); + Arrays.sort(starTreeDocuments, (doc1, doc2) -> { + for (int i = dimensionId + 1; i < numDimensions; i++) { + if (!Objects.equals(doc1.dimensions[i], doc2.dimensions[i])) { + return dimensionComparators.get(i).compare(doc1.dimensions[i], doc2.dimensions[i]); } } return 0; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/StarTreeWriter.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/StarTreeWriter.java index e5890be3ccb5b..e888235f60ee2 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/StarTreeWriter.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/StarTreeWriter.java @@ -27,8 +27,11 @@ public class StarTreeWriter { /** Initial version for the star tree writer */ public static final int VERSION_START = 0; + /** Version for the star tree writer with updated metadata which handles unsigned long */ + public static final int VERSION_DIMENSION_DATA_TYPE = 1; + /** Current version for the star tree writer */ - public static final int VERSION_CURRENT = VERSION_START; + public static final int VERSION_CURRENT = VERSION_DIMENSION_DATA_TYPE; public StarTreeWriter() {} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/DimensionConfig.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/DimensionConfig.java new file mode 100644 index 0000000000000..4515cdde34add --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/DimensionConfig.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.fileformats.meta; + +import org.apache.lucene.index.DocValuesType; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; + +/** + * Class to store DocValuesType and DimensionDataType for a dimension. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class DimensionConfig { + + private final DocValuesType docValuesType; + private final DimensionDataType dimensionDataType; + + public DimensionConfig(DocValuesType docValuesType, DimensionDataType dimensionDataType) { + this.docValuesType = docValuesType; + this.dimensionDataType = dimensionDataType; + } + + public DocValuesType getDocValuesType() { + return docValuesType; + } + + public DimensionDataType getDimensionDataType() { + return dimensionDataType; + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java index 57e47b1a5b9d9..6a28fcff5eafa 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadata.java @@ -14,9 +14,11 @@ import org.apache.lucene.store.IndexInput; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.compositeindex.CompositeIndexMetadata; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.StarTreeWriter; import org.opensearch.index.mapper.CompositeMappedFieldType; import java.io.IOException; @@ -63,10 +65,10 @@ public class StarTreeMetadata extends CompositeIndexMetadata { private final String starTreeFieldType; /** - * Map of dimension fields to their associated DocValuesType.Insertion order needs to be maintained + * Map of dimension fields to their associated DocValuesType. Insertion order needs to be maintained * as it dictates dimensionSplitOrder */ - LinkedHashMap dimensionFieldsToDocValuesMap; + private LinkedHashMap dimensionFieldToDimensionConfigMap; /** * List of metrics, containing field names and associated metric statistics. @@ -114,7 +116,7 @@ public class StarTreeMetadata extends CompositeIndexMetadata { * @param metaIn an index input to read star-tree meta * @param compositeFieldName name of the composite field. Here, name of the star-tree field. * @param compositeFieldType type of the composite field. Here, STAR_TREE field. - * @param version The version of the star tree stored in the segments. + * @param version The version of the star tree stored in the segments. * @throws IOException if unable to read star-tree metadata from the file */ public StarTreeMetadata( @@ -130,7 +132,7 @@ public StarTreeMetadata( this.starTreeFieldType = this.getCompositeFieldType().getName(); this.version = version; this.numberOfNodes = readNumberOfNodes(); - this.dimensionFieldsToDocValuesMap = readStarTreeDimensions(); + this.dimensionFieldToDimensionConfigMap = readStarTreeDimensions(); this.metrics = readMetricEntries(); this.segmentAggregatedDocCount = readSegmentAggregatedDocCount(); this.starTreeDocCount = readStarTreeDocCount(); @@ -149,19 +151,19 @@ public StarTreeMetadata( * A star tree metadata constructor to initialize star tree metadata. * Used for testing. * - * @param meta an index input to read star-tree meta - * @param compositeFieldName name of the composite field. Here, name of the star-tree field. - * @param compositeFieldType type of the composite field. Here, STAR_TREE field. - * @param version The version of the star tree stored in the segments. - * @param dimensionFieldsToDocValuesMap map of dimensionFields to docValues - * @param metrics list of metric entries - * @param segmentAggregatedDocCount segment aggregated doc count - * @param starTreeDocCount the total number of star tree documents for the segment - * @param maxLeafDocs max leaf docs - * @param skipStarNodeCreationInDims set of dimensions to skip star node creation - * @param starTreeBuildMode star tree build mode - * @param dataStartFilePointer star file pointer to the associated star tree data in (.cid) file - * @param dataLength length of the corresponding star-tree data in (.cid) file + * @param meta an index input to read star-tree meta + * @param compositeFieldName name of the composite field. Here, name of the star-tree field. + * @param compositeFieldType type of the composite field. Here, STAR_TREE field. + * @param version The version of the star tree stored in the segments. + * @param dimensionFieldToDimensionConfigMap map of dimensionFields to Dimension config + * @param metrics list of metric entries + * @param segmentAggregatedDocCount segment aggregated doc count + * @param starTreeDocCount the total number of star tree documents for the segment + * @param maxLeafDocs max leaf docs + * @param skipStarNodeCreationInDims set of dimensions to skip star node creation + * @param starTreeBuildMode star tree build mode + * @param dataStartFilePointer star file pointer to the associated star tree data in (.cid) file + * @param dataLength length of the corresponding star-tree data in (.cid) file */ public StarTreeMetadata( String compositeFieldName, @@ -169,7 +171,7 @@ public StarTreeMetadata( IndexInput meta, Integer version, Integer numberOfNodes, - LinkedHashMap dimensionFieldsToDocValuesMap, + LinkedHashMap dimensionFieldToDimensionConfigMap, List metrics, Integer segmentAggregatedDocCount, Integer starTreeDocCount, @@ -185,7 +187,7 @@ public StarTreeMetadata( this.starTreeFieldType = compositeFieldType.getName(); this.version = version; this.numberOfNodes = numberOfNodes; - this.dimensionFieldsToDocValuesMap = dimensionFieldsToDocValuesMap; + this.dimensionFieldToDimensionConfigMap = dimensionFieldToDimensionConfigMap; this.metrics = metrics; this.segmentAggregatedDocCount = segmentAggregatedDocCount; this.starTreeDocCount = starTreeDocCount; @@ -204,14 +206,24 @@ private int readDimensionsCount() throws IOException { return meta.readVInt(); } - private LinkedHashMap readStarTreeDimensions() throws IOException { + private LinkedHashMap readStarTreeDimensions() throws IOException { int dimensionCount = readDimensionsCount(); - LinkedHashMap dimensionFieldsToDocValuesMap = new LinkedHashMap<>(); + LinkedHashMap dimensionFieldToDimensionConfigMap = new LinkedHashMap<>(); for (int i = 0; i < dimensionCount; i++) { - dimensionFieldsToDocValuesMap.put(meta.readString(), getDocValuesType(meta, meta.readByte())); + if (getVersion() >= StarTreeWriter.VERSION_DIMENSION_DATA_TYPE) { + dimensionFieldToDimensionConfigMap.put( + meta.readString(), + new DimensionConfig(getDocValuesType(meta, meta.readByte()), getDimensionDataType(meta, meta.readByte())) + ); + } else { + dimensionFieldToDimensionConfigMap.put( + meta.readString(), + new DimensionConfig(getDocValuesType(meta, meta.readByte()), DimensionDataType.LONG) + ); + } } - return dimensionFieldsToDocValuesMap; + return dimensionFieldToDimensionConfigMap; } private int readMetricsCount() throws IOException { @@ -315,8 +327,8 @@ public String getStarTreeFieldType() { * * @return star-tree dimension field numbers */ - public Map getDimensionFields() { - return dimensionFieldsToDocValuesMap; + public Map getDimensionFields() { + return dimensionFieldToDimensionConfigMap; } /** @@ -393,6 +405,7 @@ public long getDataLength() { /** * Returns the version with which the star tree is stored in the segments + * * @return star-tree version */ public int getVersion() { @@ -401,6 +414,7 @@ public int getVersion() { /** * Returns the number of nodes in the star tree + * * @return number of nodes in the star tree */ public int getNumberOfNodes() { @@ -425,4 +439,15 @@ private static DocValuesType getDocValuesType(IndexInput input, byte b) throws I throw new CorruptIndexException("invalid docvalues byte: " + b, input); } } + + private static DimensionDataType getDimensionDataType(IndexInput input, byte b) throws IOException { + switch (b) { + case 0: + return DimensionDataType.LONG; + case 1: + return DimensionDataType.UNSIGNED_LONG; + default: + throw new CorruptIndexException("invalid dimensionDataType byte: " + b, input); + } + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java index 569692ce18893..5bf9c6174fd2f 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataWriter.java @@ -12,6 +12,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.store.IndexOutput; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.aggregators.MetricAggregatorInfo; import org.opensearch.index.mapper.CompositeMappedFieldType; @@ -131,9 +133,20 @@ private static void writeMeta( metaOut.writeVInt(starTreeField.getDimensionNames().size()); // dimensions - for (int i = 0; i < starTreeField.getDimensionNames().size(); i++) { - metaOut.writeString(starTreeField.getDimensionNames().get(i)); - metaOut.writeByte(docValuesByte(starTreeField.getDimensionDocValueTypes().get(i))); + List dimensionsOrder = starTreeField.getDimensionsOrder(); + int docDimensionIndex = 0; + for (Dimension currentDimension : dimensionsOrder) { + int numSubDimensions = currentDimension.getNumSubDimensions(); + + // Process each sub-dimension + while (numSubDimensions > 0) { + metaOut.writeString(starTreeField.getDimensionNames().get(docDimensionIndex)); + metaOut.writeByte(docValuesByte(starTreeField.getDimensionDocValueTypes().get(docDimensionIndex))); + metaOut.writeByte(dimensionDataTypeByte(currentDimension.getDimensionDataType())); + + numSubDimensions--; + docDimensionIndex++; + } } // number of metrics @@ -174,6 +187,17 @@ private static void writeMeta( } + private static byte dimensionDataTypeByte(DimensionDataType dimensionDataType) { + switch (dimensionDataType) { + case LONG: + return 0; + case UNSIGNED_LONG: + return 1; + default: + throw new AssertionError("unhandled dimensionDataType: " + dimensionDataType); + } + } + private static byte docValuesByte(DocValuesType type) { switch (type) { case NONE: diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java index 6a13e6e789f3a..6658d53afd21a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java @@ -24,6 +24,7 @@ import org.opensearch.index.compositeindex.datacube.ReadDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.DimensionConfig; import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeFactory; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; @@ -131,13 +132,15 @@ public StarTreeValues( // build dimensions List readDimensions = new ArrayList<>(); - for (String dimension : starTreeMetadata.getDimensionFields().keySet()) { + for (Map.Entry dimensionEntry : starTreeMetadata.getDimensionFields().entrySet()) { + String dimension = dimensionEntry.getKey(); readDimensions.add( new ReadDimension( dimension, readState.fieldInfos.fieldInfo( fullyQualifiedFieldNameForStarTreeDimensionsDocValues(starTreeMetadata.getCompositeFieldName(), dimension) - ).getDocValuesType() + ).getDocValuesType(), + dimensionEntry.getValue().getDimensionDataType() ) ); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java index fce3e30e9ebf6..3767f6850002a 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java @@ -89,13 +89,13 @@ public interface StarTreeNode { * *

The node type can be one of the following: *

    - *
  • Star Node: Represented by the value -2. - *
  • Null Node: Represented by the value -1. + *
  • Star Node: Represented by the value -1. + *
  • Null Node: Represented by the value 1. *
  • Default Node: Represented by the value 0. *
* @see StarTreeNodeType * - * @return The type of the current node, represented by the corresponding integer value (-2, -1, or 0). + * @return The type of the current node, represented by the corresponding integer value (-1, 1, 0). * @throws IOException if an I/O error occurs while reading the node type */ byte getStarTreeNodeType() throws IOException; @@ -103,7 +103,7 @@ public interface StarTreeNode { /** * Returns the child node for the given dimension value in the star-tree. * - * @param dimensionValue the dimension value + * @param dimensionValue the dimension value * @return the child node for the given dimension value or null if child is not present * @throws IOException if an I/O error occurs while retrieving the child node */ diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java index 7b1c63bc611ee..09a653f13cf28 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorter.java @@ -10,6 +10,8 @@ import org.apache.lucene.util.IntroSorter; +import java.util.Comparator; +import java.util.List; import java.util.Objects; import java.util.function.IntFunction; @@ -24,7 +26,8 @@ public static void sort( final int[] sortedDocIds, final int dimensionId, final int numDocs, - final IntFunction dimensionsReader + final IntFunction dimensionsReader, + final List> dimensionComparators ) { new IntroSorter() { private Long[] dimensions; @@ -45,18 +48,8 @@ protected void setPivot(int i) { protected int comparePivot(int j) { Long[] currentDimensions = dimensionsReader.apply(j); for (int i = dimensionId + 1; i < dimensions.length; i++) { - Long dimension = currentDimensions[i]; - if (!Objects.equals(dimensions[i], dimension)) { - if (dimensions[i] == null && dimension == null) { - return 0; - } - if (dimension == null) { - return -1; - } - if (dimensions[i] == null) { - return 1; - } - return Long.compare(dimensions[i], dimension); + if (!Objects.equals(dimensions[i], currentDimensions[i])) { + return dimensionComparators.get(i).compare(dimensions[i], currentDimensions[i]); } } return 0; diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index 702e5db50e841..f3fc3f4b2aa95 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -91,7 +91,7 @@ import org.roaringbitmap.RoaringBitmap; /** - * A {@link FieldMapper} for numeric types: byte, short, int, long, float and double. + * A {@link FieldMapper} for numeric types: byte, short, int, long, float, double and unsigned long. * * @opensearch.internal */ @@ -175,13 +175,9 @@ public NumberFieldMapper build(BuilderContext context) { @Override public Optional getSupportedDataCubeDimensionType() { - - // unsigned long is not supported as dimension for star tree - if (type.numericType.equals(NumericType.UNSIGNED_LONG)) { - return Optional.empty(); - } - - return Optional.of(DimensionType.NUMERIC); + return type.numericType.equals(NumericType.UNSIGNED_LONG) + ? Optional.of(DimensionType.UNSIGNED_LONG) + : Optional.of(DimensionType.NUMERIC); } @Override diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java b/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java index ea2c43a40f330..1629b9d0c1db4 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java @@ -123,11 +123,8 @@ private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Ma DocIdSetBuilder.BulkAdder adder; Set globalRemainingPredicateColumns = null; StarTreeNode starTree = starTreeValues.getRoot(); - List dimensionNames = starTreeValues.getStarTreeField() - .getDimensionsOrder() - .stream() - .map(Dimension::getField) - .collect(Collectors.toList()); + List dimensionsOrder = starTreeValues.getStarTreeField().getDimensionsOrder(); + List dimensionNames = dimensionsOrder.stream().map(Dimension::getField).collect(Collectors.toList()); boolean foundLeafNode = starTree.isLeaf(); assert foundLeafNode == false; // root node is never leaf Queue queue = new ArrayDeque<>(); diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java index 03798c6e4ce55..b2572af042b9c 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/StarTreeDocValuesFormatTests.java @@ -57,22 +57,26 @@ public void testStarTreeDocValues() throws IOException { conf.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); Document doc = new Document(); + doc.add(new SortedNumericDocValuesField("unsignedLongDimension", 10)); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedNumericDocValuesField("dv1", 1)); doc.add(new SortedNumericDocValuesField("field1", -1)); iw.addDocument(doc); doc = new Document(); + doc.add(new SortedNumericDocValuesField("unsignedLongDimension", 10)); doc.add(new SortedNumericDocValuesField("sndv", 1)); doc.add(new SortedNumericDocValuesField("dv1", 1)); doc.add(new SortedNumericDocValuesField("field1", -1)); iw.addDocument(doc); doc = new Document(); iw.forceMerge(1); + doc.add(new SortedNumericDocValuesField("unsignedLongDimension", -20)); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedNumericDocValuesField("dv1", 2)); doc.add(new SortedNumericDocValuesField("field1", -2)); iw.addDocument(doc); doc = new Document(); + doc.add(new SortedNumericDocValuesField("unsignedLongDimension", -20)); doc.add(new SortedNumericDocValuesField("sndv", 2)); doc.add(new SortedNumericDocValuesField("dv1", 2)); doc.add(new SortedNumericDocValuesField("field1", -2)); @@ -86,35 +90,35 @@ public void testStarTreeDocValues() throws IOException { // Segment documents /** - * sndv dv field - * [1, 1, -1] - * [1, 1, -1] - * [2, 2, -2] - * [2, 2, -2] + * unsignedLongDimension sndv dv field + * [10, 1, 1, -1] + * [10, 1, 1, -1] + * [-20, 2, 2, -2] + * [-20, 2, 2, -2] */ - // Star tree docuements + // Star tree documents /** - * sndv dv | [ sum, value_count, min, max[field]] , [ sum, value_count, min, max[sndv]], doc_count - * [1, 1] | [-2.0, 2.0, -1.0, -1.0, 2.0, 2.0, 1.0, 1.0, 2.0] - * [2, 2] | [-4.0, 2.0, -2.0, -2.0, 4.0, 2.0, 2.0, 2.0, 2.0] - * [null, 1] | [-2.0, 2.0, -1.0, -1.0, 2.0, 2.0, 1.0, 1.0, 2.0] - * [null, 2] | [-4.0, 2.0, -2.0, -2.0, 4.0, 2.0, 2.0, 2.0, 2.0] + * unsignedLongDimension sndv dv | [ sum, value_count, min, max[field]] , [ sum, value_count, min, max[sndv]], doc_count + * [10, 1, 1] | [-2.0, 2.0, -1.0, -1.0, 2.0, 2.0, 1.0, 1.0, 2.0] + * [-20, 2, 2] | [-4.0, 2.0, -2.0, -2.0, 4.0, 2.0, 2.0, 2.0, 2.0] + * [null, 1, 1] | [-2.0, 2.0, -1.0, -1.0, 2.0, 2.0, 1.0, 1.0, 2.0] + * [null, 2, 2] | [-4.0, 2.0, -2.0, -2.0, 4.0, 2.0, 2.0, 2.0, 2.0] */ StarTreeDocument[] expectedStarTreeDocuments = new StarTreeDocument[4]; expectedStarTreeDocuments[0] = new StarTreeDocument( - new Long[] { 1L, 1L }, + new Long[] { 10L, 1L, 1L }, new Double[] { -2.0, 2.0, -1.0, -1.0, 2.0, 2.0, 1.0, 1.0, 2.0 } ); expectedStarTreeDocuments[1] = new StarTreeDocument( - new Long[] { 2L, 2L }, + new Long[] { -20L, 2L, 2L }, new Double[] { -4.0, 2.0, -2.0, -2.0, 4.0, 2.0, 2.0, 2.0, 2.0 } ); expectedStarTreeDocuments[2] = new StarTreeDocument( - new Long[] { null, 1L }, + new Long[] { null, 1L, 1L }, new Double[] { -2.0, 2.0, -1.0, -1.0, 2.0, 2.0, 1.0, 1.0, 2.0 } ); expectedStarTreeDocuments[3] = new StarTreeDocument( - new Long[] { null, 2L }, + new Long[] { null, 2L, 2L }, new Double[] { -4.0, 2.0, -2.0, -2.0, 4.0, 2.0, 2.0, 2.0, 2.0 } ); @@ -264,6 +268,9 @@ public static XContentBuilder getExpandedMapping() throws IOException { b.field("max_leaf_docs", 1); b.startArray("ordered_dimensions"); b.startObject(); + b.field("name", "unsignedLongDimension"); // UnsignedLongDimension + b.endObject(); + b.startObject(); b.field("name", "sndv"); b.endObject(); b.startObject(); @@ -305,6 +312,9 @@ public static XContentBuilder getExpandedMapping() throws IOException { b.startObject("field1"); b.field("type", "integer"); b.endObject(); + b.startObject("unsignedLongDimension"); + b.field("type", "unsigned_long"); + b.endObject(); b.endObject(); }); } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java index 44e40f1db4cc8..7e8ecf79d7443 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java @@ -161,7 +161,8 @@ public static void validateFileFormats( IndexInput dataIn, IndexInput metaIn, InMemoryTreeNode rootNode, - StarTreeMetadata expectedStarTreeMetadata + StarTreeMetadata expectedStarTreeMetadata, + StarTreeField starTreeField ) throws IOException { long magicMarker = metaIn.readLong(); assertEquals(COMPOSITE_FIELD_MARKER, magicMarker); @@ -201,11 +202,18 @@ public static void validateFileFormats( if (rootNode.getChildren() != null) { sortedChildren = new ArrayList<>(rootNode.getChildren().values()); } - - if (starTreeNode.getChildDimensionId() != -1) { + int dimensionId = starTreeNode.getChildDimensionId(); + List dimensionsOrder = starTreeField.getDimensionsOrder(); + if (dimensionId != -1) { assertFalse(sortedChildren.isEmpty()); int childCount = 0; boolean childStarNodeAsserted = false; + boolean nodeWithMinusOneValueFound = false; + /* + Since NULL nodes have a dimension value of -1, we need to track whether we have encountered any + default nodes with this dimension value. We will perform the assertNull() check only if we have not + yet found a default node with a dimension value of -1. + */ while (expectedChildrenIterator.hasNext()) { StarTreeNode child = expectedChildrenIterator.next(); InMemoryTreeNode resultChildNode = null; @@ -220,10 +228,13 @@ public static void validateFileFormats( resultChildNode = sortedChildren.get(childCount); assertNotNull(child); assertNotNull(resultChildNode); - if (child.getStarTreeNodeType() != StarTreeNodeType.NULL.getValue()) { - assertNotNull(starTreeNode.getChildForDimensionValue(child.getDimensionValue())); - } else { + if (child.getStarTreeNodeType() == StarTreeNodeType.NULL.getValue() && !nodeWithMinusOneValueFound) { assertNull(starTreeNode.getChildForDimensionValue(child.getDimensionValue())); + } else { + if (child.getDimensionValue() == -1L) { + nodeWithMinusOneValueFound = true; + } + assertNotNull(starTreeNode.getChildForDimensionValue(child.getDimensionValue())); } assertStarTreeNode(child, resultChildNode); assertNotEquals(child.getStarTreeNodeType(), StarTreeNodeType.STAR.getValue()); @@ -271,14 +282,21 @@ public static void assertStarTreeMetadata(StarTreeMetadata expectedStarTreeMetad assertEquals(expectedStarTreeMetadata.getCompositeFieldName(), resultStarTreeMetadata.getCompositeFieldName()); assertEquals(expectedStarTreeMetadata.getCompositeFieldType(), resultStarTreeMetadata.getCompositeFieldType()); + assertEquals(expectedStarTreeMetadata.getDimensionFields().size(), resultStarTreeMetadata.getDimensionFields().size()); - for (int i = 0; i < expectedStarTreeMetadata.getDimensionFields().size(); i++) { - assertEquals(expectedStarTreeMetadata.getDimensionFields().get(i), resultStarTreeMetadata.getDimensionFields().get(i)); - } - assertEquals(expectedStarTreeMetadata.getMetrics().size(), resultStarTreeMetadata.getMetrics().size()); + expectedStarTreeMetadata.getDimensionFields().forEach((dimensionField, dimensionConfig) -> { + assertEquals( + dimensionConfig.getDocValuesType(), + resultStarTreeMetadata.getDimensionFields().get(dimensionField).getDocValuesType() + ); + assertEquals( + dimensionConfig.getDimensionDataType(), + resultStarTreeMetadata.getDimensionFields().get(dimensionField).getDimensionDataType() + ); + }); + assertEquals(expectedStarTreeMetadata.getMetrics().size(), resultStarTreeMetadata.getMetrics().size()); for (int i = 0; i < expectedStarTreeMetadata.getMetrics().size(); i++) { - Metric expectedMetric = expectedStarTreeMetadata.getMetrics().get(i); Metric resultMetric = resultStarTreeMetadata.getMetrics().get(i); assertEquals(expectedMetric.getField(), resultMetric.getField()); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java index e7c3d50c9572a..06c04ce67ea05 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/BuilderTestsUtils.java @@ -36,6 +36,7 @@ import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeTestUtils; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.DimensionConfig; import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.node.InMemoryTreeNode; @@ -439,7 +440,7 @@ public static void validateStarTreeFileFormats( StarTreeDocument[] expectedStarTreeDocumentsArray = expectedStarTreeDocuments.toArray(new StarTreeDocument[0]); StarTreeTestUtils.assertStarTreeDocuments(starTreeDocuments, expectedStarTreeDocumentsArray); - validateFileFormats(dataIn, metaIn, rootNode, expectedStarTreeMetadata); + validateFileFormats(dataIn, metaIn, rootNode, expectedStarTreeMetadata, starTreeField); dataIn.close(); metaIn.close(); @@ -448,7 +449,7 @@ public static void validateStarTreeFileFormats( public static SegmentReadState getReadState( int numDocs, - Map dimensionFields, + Map dimensionFields, List metrics, StarTreeField compositeField, SegmentWriteState writeState, @@ -471,7 +472,7 @@ public static SegmentReadState getReadState( false, true, IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS, - dimensionFields.get(dimension), + dimensionFields.get(dimension).getDocValuesType(), DocValuesSkipIndexType.RANGE, -1, Collections.emptyMap(), diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java index 72ee197a93e18..5690f1d9ef07e 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuildMetricTests.java @@ -30,12 +30,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory; import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.DimensionConfig; import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.node.InMemoryTreeNode; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; @@ -440,6 +443,130 @@ public void test_build_longMetrics() throws IOException { ); } + public void test_build_unsigned_longMetrics() throws IOException { + + mapperService = mock(MapperService.class); + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(mapperService.documentMapper()).thenReturn(documentMapper); + Settings settings = Settings.builder().put(settings(org.opensearch.Version.CURRENT).build()).build(); + NumberFieldMapper numberFieldMapper1 = new NumberFieldMapper.Builder( + "field2", + NumberFieldMapper.NumberType.UNSIGNED_LONG, + false, + true + ).build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper2 = new NumberFieldMapper.Builder( + "field4", + NumberFieldMapper.NumberType.UNSIGNED_LONG, + false, + true + ).build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper3 = new NumberFieldMapper.Builder( + "field6", + NumberFieldMapper.NumberType.UNSIGNED_LONG, + false, + true + ).build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper4 = new NumberFieldMapper.Builder( + "field9", + NumberFieldMapper.NumberType.UNSIGNED_LONG, + false, + true + ).build(new Mapper.BuilderContext(settings, new ContentPath())); + NumberFieldMapper numberFieldMapper5 = new NumberFieldMapper.Builder( + "field10", + NumberFieldMapper.NumberType.UNSIGNED_LONG, + false, + true + ).build(new Mapper.BuilderContext(settings, new ContentPath())); + MappingLookup fieldMappers = new MappingLookup( + Set.of(numberFieldMapper1, numberFieldMapper2, numberFieldMapper3, numberFieldMapper4, numberFieldMapper5), + Collections.emptyList(), + Collections.emptyList(), + 0, + null + ); + when(documentMapper.mappers()).thenReturn(fieldMappers); + + int noOfStarTreeDocuments = 5; + StarTreeDocument[] starTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + + starTreeDocuments[0] = new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Long[] { 12L, 10L, randomLong(), 8L, -1L }); + starTreeDocuments[1] = new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Long[] { -2L, -9223372036854775808L, randomLong(), 12L, 10L } + ); + starTreeDocuments[2] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 14L, 12L, randomLong(), 6L, 24L }); + starTreeDocuments[3] = new StarTreeDocument( + new Long[] { 2L, 4L, 3L, 4L }, + new Long[] { 9L, 4L, randomLong(), -9223372036854775806L, 12L } + ); + starTreeDocuments[4] = new StarTreeDocument(new Long[] { 3L, 4L, 2L, 1L }, new Long[] { 11L, 16L, randomLong(), 8L, 13L }); + + StarTreeDocument[] segmentStarTreeDocuments = new StarTreeDocument[noOfStarTreeDocuments]; + for (int i = 0; i < noOfStarTreeDocuments; i++) { + long metric1 = (Long) starTreeDocuments[i].metrics[0]; + long metric2 = (Long) starTreeDocuments[i].metrics[1]; + long metric3 = (Long) starTreeDocuments[i].metrics[2]; + long metric4 = (Long) starTreeDocuments[i].metrics[3]; + long metric5 = (Long) starTreeDocuments[i].metrics[4]; + segmentStarTreeDocuments[i] = new StarTreeDocument( + starTreeDocuments[i].dimensions, + new Long[] { metric1, metric2, metric3, metric4, metric5, null } + ); + } + + SequentialDocValuesIterator[] dimsIterators = getDimensionIterators(segmentStarTreeDocuments); + List metricsIterators = getMetricIterators(segmentStarTreeDocuments); + this.docValuesConsumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + writeState, + Composite912DocValuesFormat.DATA_DOC_VALUES_CODEC, + Composite912DocValuesFormat.DATA_DOC_VALUES_EXTENSION, + Composite912DocValuesFormat.META_DOC_VALUES_CODEC, + Composite912DocValuesFormat.META_DOC_VALUES_EXTENSION + ); + builder = getStarTreeBuilder(metaOut, dataOut, compositeField, writeState, mapperService); + Iterator segmentStarTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimsIterators, + metricsIterators + ); + builder.build(segmentStarTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); + + List resultStarTreeDocuments = builder.getStarTreeDocuments(); + assertEquals(7, resultStarTreeDocuments.size()); + + Iterator expectedStarTreeDocumentIterator = getExpectedStarTreeDocumentIteratorForUnsignedLong().iterator(); + assertStarTreeDocuments(resultStarTreeDocuments, expectedStarTreeDocumentIterator); + + metaOut.close(); + dataOut.close(); + docValuesConsumer.close(); + + StarTreeMetadata starTreeMetadata = new StarTreeMetadata( + "test", + STAR_TREE, + mock(IndexInput.class), + VERSION_CURRENT, + builder.numStarTreeNodes, + getStarTreeDimensionNames(compositeField.getDimensionsOrder()), + compositeField.getMetrics(), + 2, + getExpectedStarTreeDocumentIterator().size(), + 1, + Set.of("field8"), + getBuildMode(), + 0, + 330 + ); + + validateStarTreeFileFormats( + builder.getRootNode(), + getExpectedStarTreeDocumentIteratorForUnsignedLong().size(), + starTreeMetadata, + getExpectedStarTreeDocumentIteratorForUnsignedLong() + ); + } + public void test_build_multipleStarTrees() throws IOException { int noOfStarTreeDocuments = 5; @@ -594,11 +721,11 @@ public void test_build_multipleStarTrees() throws IOException { metaOut.close(); dataOut.close(); - LinkedHashMap fieldsMap = new LinkedHashMap<>(); - fieldsMap.put("field1", DocValuesType.SORTED_NUMERIC); - fieldsMap.put("field3", DocValuesType.SORTED_NUMERIC); - fieldsMap.put("field5", DocValuesType.SORTED_NUMERIC); - fieldsMap.put("field8", DocValuesType.SORTED_NUMERIC); + LinkedHashMap fieldsMap = new LinkedHashMap<>(); + fieldsMap.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + fieldsMap.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + fieldsMap.put("field5", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + fieldsMap.put("field8", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = new StarTreeMetadata( "test", @@ -623,10 +750,10 @@ public void test_build_multipleStarTrees() throws IOException { 330 ); - LinkedHashMap fieldsMap1 = new LinkedHashMap<>(); - fieldsMap1.put("fieldC", DocValuesType.SORTED_NUMERIC); - fieldsMap1.put("fieldB", DocValuesType.SORTED_NUMERIC); - fieldsMap1.put("fieldL", DocValuesType.SORTED_NUMERIC); + LinkedHashMap fieldsMap1 = new LinkedHashMap<>(); + fieldsMap1.put("fieldC", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + fieldsMap1.put("fieldB", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + fieldsMap1.put("fieldL", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata2 = new StarTreeMetadata( "test", @@ -645,7 +772,7 @@ public void test_build_multipleStarTrees() throws IOException { 1287 ); - LinkedHashMap totalDimensionFields = new LinkedHashMap<>(starTreeMetadata.getDimensionFields()); + LinkedHashMap totalDimensionFields = new LinkedHashMap<>(starTreeMetadata.getDimensionFields()); totalDimensionFields.putAll(starTreeMetadata2.getDimensionFields()); List metrics = new ArrayList<>(); @@ -656,9 +783,32 @@ public void test_build_multipleStarTrees() throws IOException { IndexInput dataIn = readState.directory.openInput(dataFileName, IOContext.DEFAULT); IndexInput metaIn = readState.directory.openInput(metaFileName, IOContext.DEFAULT); + List dimensionsOrder1 = List.of( + new NumericDimension("field1"), + new NumericDimension("field3"), + new NumericDimension("field5"), + new NumericDimension("field8") + ); + List dimensionsOrder2 = List.of( + new NumericDimension("fieldC"), + new NumericDimension("fieldB"), + new NumericDimension("fieldL") + ); + StarTreeField compositeField1 = new StarTreeField( + "test", + dimensionsOrder1, + metrics, + new StarTreeFieldConfiguration(1, Set.of(), getBuildMode()) + ); + StarTreeField compositeField2 = new StarTreeField( + "test", + dimensionsOrder2, + metrics, + new StarTreeFieldConfiguration(1, Set.of(), getBuildMode()) + ); - validateFileFormats(dataIn, metaIn, rootNode1, starTreeMetadata); - validateFileFormats(dataIn, metaIn, rootNode2, starTreeMetadata2); + validateFileFormats(dataIn, metaIn, rootNode1, starTreeMetadata, compositeField1); + validateFileFormats(dataIn, metaIn, rootNode2, starTreeMetadata2, compositeField2); dataIn.close(); metaIn.close(); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java index 7ecf175b5eb09..6bc75d01bc9d3 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderFlushFlowTests.java @@ -20,14 +20,17 @@ import org.opensearch.index.codec.composite.LuceneDocValuesConsumerFactory; import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; import org.opensearch.index.compositeindex.datacube.IpDimension; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.compositeindex.datacube.OrdinalDimension; +import org.opensearch.index.compositeindex.datacube.UnsignedLongDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.DimensionConfig; import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; @@ -132,9 +135,9 @@ public void testFlushFlow() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap docValues = new LinkedHashMap<>(); - docValues.put("field1", DocValuesType.SORTED_NUMERIC); - docValues.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap docValues = new LinkedHashMap<>(); + docValues.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + docValues.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = new StarTreeMetadata( "sf", STAR_TREE, @@ -233,9 +236,9 @@ public void testFlushFlowDimsReverse() throws IOException { dataOut.close(); docValuesConsumer.close(); - LinkedHashMap docValues = new LinkedHashMap<>(); - docValues.put("field1", DocValuesType.SORTED_NUMERIC); - docValues.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap docValues = new LinkedHashMap<>(); + docValues.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + docValues.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = new StarTreeMetadata( "sf", STAR_TREE, @@ -261,6 +264,121 @@ public void testFlushFlowDimsReverse() throws IOException { ); } + public void testFlushFlowWithUnsignedLongDimensions() throws IOException { + List dimList = List.of(0L, -1L, 9223372036854775806L, 4987L, -9223372036854775807L); + List docsWithField = List.of(0, 1, 3, 4, 5); + List dimList2 = List.of(0L, -1L, 2L, 9223372036854775806L, 4987L, -9223372036854775807L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5); + + List metricsList = List.of( + getLongFromDouble(0.0), + getLongFromDouble(10.0), + getLongFromDouble(20.0), + getLongFromDouble(30.0), + getLongFromDouble(40.0), + getLongFromDouble(50.0) + ); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5); + + compositeField = getStarTreeFieldWithUnsignedLongField(); + SortedNumericStarTreeValuesIterator d1sndv = new SortedNumericStarTreeValuesIterator(getSortedNumericMock(dimList, docsWithField)); + SortedNumericStarTreeValuesIterator d2sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(dimList2, docsWithField2) + ); + SortedNumericStarTreeValuesIterator m1sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); + SortedNumericStarTreeValuesIterator m2sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); + + writeState = getWriteState(6, writeState.segmentInfo.getId()); + builder = getStarTreeBuilder(metaOut, dataOut, compositeField, writeState, mapperService); + SequentialDocValuesIterator[] dimDvs = { new SequentialDocValuesIterator(d1sndv), new SequentialDocValuesIterator(d2sndv) }; + Iterator starTreeDocumentIterator = builder.sortAndAggregateSegmentDocuments( + dimDvs, + List.of(new SequentialDocValuesIterator(m1sndv), new SequentialDocValuesIterator(m2sndv)) + ); + + this.docValuesConsumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + writeState, + Composite912DocValuesFormat.DATA_DOC_VALUES_CODEC, + Composite912DocValuesFormat.DATA_DOC_VALUES_EXTENSION, + Composite912DocValuesFormat.META_DOC_VALUES_CODEC, + Composite912DocValuesFormat.META_DOC_VALUES_EXTENSION + ); + builder.build(starTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); + List starTreeDocuments = builder.getStarTreeDocuments(); + + /* + Asserting following dim / metrics [ dim1, dim2 / Sum [metric], count [metric] ] + [0, 0] | [0.0, 1] + [4987, 4987] | [40.0, 1] + [9223372036854775806, 9223372036854775806] | [30.0, 1] + [-9223372036854775807, -9223372036854775807] | [50.0, 1] + [-1, -1] | [10.0, 1] + [null, 2] | [20.0, 1] + */ + Object[][] expectedSortedDimensions = { + { 0L, 0L }, + { 4987L, 4987L }, + { 9223372036854775806L, 9223372036854775806L }, + { -9223372036854775807L, -9223372036854775807L }, + { -1L, -1L }, + { null, 2L } }; + + double[] expectedSumMetrics = { 0.0, 40.0, 30.0, 50.0, 10.0, 20.0 }; + long expectedCountMetric = 1L; + + int count = 0; + for (StarTreeDocument starTreeDocument : starTreeDocuments) { + if (count < 6) { + assertEquals(expectedSumMetrics[count], starTreeDocument.metrics[0]); + assertEquals(expectedCountMetric, starTreeDocument.metrics[1]); + + Long dim1 = starTreeDocument.dimensions[0]; + Long dim2 = starTreeDocument.dimensions[1]; + assertEquals(expectedSortedDimensions[count][0], dim1); + assertEquals(expectedSortedDimensions[count][1], dim2); + } + count++; + } + assertEquals(13, count); + validateStarTree(builder.getRootNode(), 2, 1000, builder.getStarTreeDocuments()); + + metaOut.close(); + dataOut.close(); + docValuesConsumer.close(); + LinkedHashMap docValues = new LinkedHashMap<>(); + docValues.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG)); + docValues.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG)); + StarTreeMetadata starTreeMetadata = new StarTreeMetadata( + "sf", + STAR_TREE, + mock(IndexInput.class), + VERSION_CURRENT, + builder.numStarTreeNodes, + docValues, + List.of(new Metric("field2", List.of(MetricStat.SUM, MetricStat.VALUE_COUNT, MetricStat.AVG))), + 6, + builder.numStarTreeDocs, + 1000, + Set.of(), + getBuildMode(), + 0, + 264 + ); + + // validateStarTreeFileFormats( + // builder.getRootNode(), + // builder.getStarTreeDocuments().size(), + // starTreeMetadata, + // builder.getStarTreeDocuments() + // ); + + // TODO: Fix this post 2.19 [Handling search for unsigned-long as part of star-tree] + } + public void testFlushFlowBuild() throws IOException { List dimList = new ArrayList<>(100); List docsWithField = new ArrayList<>(100); @@ -337,9 +455,9 @@ public void testFlushFlowBuild() throws IOException { dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 100, 1, 6699); validateStarTreeFileFormats( @@ -496,9 +614,9 @@ public void testFlushFlowForKeywords() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap docValues = new LinkedHashMap<>(); - docValues.put("field1", DocValuesType.SORTED_SET); - docValues.put("field3", DocValuesType.SORTED_SET); + LinkedHashMap docValues = new LinkedHashMap<>(); + docValues.put("field1", new DimensionConfig(DocValuesType.SORTED_SET, DimensionDataType.LONG)); + docValues.put("field3", new DimensionConfig(DocValuesType.SORTED_SET, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = new StarTreeMetadata( "sf", STAR_TREE, @@ -537,6 +655,18 @@ private StarTreeField getStarTreeFieldWithMultipleMetrics() { return new StarTreeField("sf", dims, metrics, c); } + private StarTreeField getStarTreeFieldWithUnsignedLongField() { + Dimension d1 = new UnsignedLongDimension("field1"); + Dimension d2 = new UnsignedLongDimension("field3"); + Metric m1 = new Metric("field2", List.of(MetricStat.SUM)); + Metric m2 = new Metric("field2", List.of(MetricStat.VALUE_COUNT)); + Metric m3 = new Metric("field2", List.of(MetricStat.AVG)); + List dims = List.of(d1, d2); + List metrics = List.of(m1, m2, m3); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration(1000, new HashSet<>(), getBuildMode()); + return new StarTreeField("sf", dims, metrics, c); + } + private StarTreeField getStarTreeFieldWithKeywordField(boolean isIp) { Dimension d1 = isIp ? new IpDimension("field1") : new OrdinalDimension("field1"); Dimension d2 = isIp ? new IpDimension("field3") : new OrdinalDimension("field3"); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java index 74ecff04076b1..3ed2881b72490 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderMergeFlowTests.java @@ -19,12 +19,15 @@ import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat; import org.opensearch.index.compositeindex.CompositeIndexConstants; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.DimensionDataType; import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.ReadDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.DimensionConfig; import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; @@ -94,13 +97,6 @@ public void testMergeFlow() throws IOException { docsWithField4.add(i); } - List dimList5 = new ArrayList<>(1000); - List docsWithField5 = new ArrayList<>(1000); - for (int i = 0; i < 1000; i++) { - dimList5.add((long) i); - docsWithField5.add(i); - } - List metricsList = new ArrayList<>(1000); List metricsWithField = new ArrayList<>(1000); for (int i = 0; i < 1000; i++) { @@ -119,7 +115,6 @@ public void testMergeFlow() throws IOException { Dimension d2 = new NumericDimension("field3"); Dimension d3 = new NumericDimension("field5"); Dimension d4 = new NumericDimension("field8"); - // Dimension d5 = new NumericDimension("field5"); Metric m1 = new Metric("field2", List.of(MetricStat.SUM, MetricStat.AVG, MetricStat.VALUE_COUNT)); Metric m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); List dims = List.of(d1, d2, d3, d4); @@ -216,12 +211,16 @@ public void testMergeFlow() throws IOException { ... [999, 999, 999, 999] | [19980.0] */ + builder.appendDocumentsToStarTree(starTreeDocumentIterator); + builder.build(starTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); + int count = 0; for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { - assertEquals(starTreeDocument.dimensions[0] * 20.0, starTreeDocument.metrics[0]); - assertEquals(2L, starTreeDocument.metrics[1]); + if (count < 1000) { + assertEquals(starTreeDocument.dimensions[0] * 20.0, starTreeDocument.metrics[0]); + assertEquals(2L, starTreeDocument.metrics[2]); + } + count++; } - builder.build(starTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); - // Validate the star tree structure validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); @@ -244,6 +243,195 @@ public void testMergeFlow() throws IOException { ); } + public void testMergeFlowForUnsignedLong() throws IOException { + int numDocs = 1000; + List dimList1 = new ArrayList<>(numDocs); + List docsWithField1 = new ArrayList<>(numDocs); + for (int i = 0; i < numDocs; i++) { + dimList1.add((long) (i % 2 == 0 ? i : -i)); + docsWithField1.add(i); + } + + List dimList2 = new ArrayList<>(numDocs); + List docsWithField2 = new ArrayList<>(numDocs); + for (int i = 0; i < numDocs; i++) { + dimList2.add((long) (i % 2 == 0 ? i : -i)); + docsWithField2.add(i); + } + + List dimList3 = new ArrayList<>(numDocs); + List docsWithField3 = new ArrayList<>(numDocs); + for (int i = 0; i < numDocs; i++) { + dimList3.add((long) (i % 2 == 0 ? i : -i)); + docsWithField3.add(i); + } + + List dimList4 = new ArrayList<>(numDocs); + List docsWithField4 = new ArrayList<>(numDocs); + for (int i = 0; i < numDocs; i++) { + dimList4.add((long) (i % 2 == 0 ? i : -i)); + docsWithField4.add(i); + } + + List metricsList = new ArrayList<>(1000); + List metricsWithField = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + metricsList.add(getLongFromDouble(i * 10.0)); + metricsWithField.add(i); + } + + List metricsListValueCount = new ArrayList<>(1000); + List metricsWithFieldValueCount = new ArrayList<>(1000); + for (int i = 0; i < 1000; i++) { + metricsListValueCount.add((long) i); + metricsWithFieldValueCount.add(i); + } + + Dimension d1 = new ReadDimension("field1", DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG); + Dimension d2 = new ReadDimension("field3", DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG); + Dimension d3 = new ReadDimension("field5", DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG); + Dimension d4 = new ReadDimension("field8", DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG); + + Metric m1 = new Metric("field2", List.of(MetricStat.SUM, MetricStat.AVG, MetricStat.VALUE_COUNT)); + Metric m2 = new Metric("_doc_count", List.of(MetricStat.DOC_COUNT)); + + List dims = List.of(d1, d2, d3, d4); + List metrics = List.of(m1, m2); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration(1, new HashSet<>(), getBuildMode()); + compositeField = new StarTreeField("sf", dims, metrics, c); + SortedNumericDocValues d1sndv = getSortedNumericMock(dimList1, docsWithField1); + SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues d3sndv = getSortedNumericMock(dimList3, docsWithField3); + SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); + SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues valucountsndv = getSortedNumericMock(metricsListValueCount, metricsWithFieldValueCount); + SortedNumericDocValues m2sndv = DocValues.emptySortedNumeric(); + Map> dimDocIdSetIterators = Map.of( + "field1", + () -> new SortedNumericStarTreeValuesIterator(d1sndv), + "field3", + () -> new SortedNumericStarTreeValuesIterator(d2sndv), + "field5", + () -> new SortedNumericStarTreeValuesIterator(d3sndv), + "field8", + () -> new SortedNumericStarTreeValuesIterator(d4sndv) + ); + + Map> metricDocIdSetIterators = Map.of( + "sf_field2_sum_metric", + () -> new SortedNumericStarTreeValuesIterator(m1sndv), + "sf_field2_value_count_metric", + () -> new SortedNumericStarTreeValuesIterator(valucountsndv), + "sf__doc_count_doc_count_metric", + () -> new SortedNumericStarTreeValuesIterator(m2sndv) + ); + + StarTreeValues starTreeValues = new StarTreeValues( + compositeField, + null, + dimDocIdSetIterators, + metricDocIdSetIterators, + getAttributes(1000), + null + ); + + SortedNumericDocValues f2d1sndv = getSortedNumericMock(dimList1, docsWithField1); + SortedNumericDocValues f2d2sndv = getSortedNumericMock(dimList2, docsWithField2); + SortedNumericDocValues f2d3sndv = getSortedNumericMock(dimList3, docsWithField3); + SortedNumericDocValues f2d4sndv = getSortedNumericMock(dimList4, docsWithField4); + SortedNumericDocValues f2m1sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericDocValues f2valucountsndv = getSortedNumericMock(metricsListValueCount, metricsWithFieldValueCount); + SortedNumericDocValues f2m2sndv = DocValues.emptySortedNumeric(); + Map> f2dimDocIdSetIterators = Map.of( + "field1", + () -> new SortedNumericStarTreeValuesIterator(f2d1sndv), + "field3", + () -> new SortedNumericStarTreeValuesIterator(f2d2sndv), + "field5", + () -> new SortedNumericStarTreeValuesIterator(f2d3sndv), + "field8", + () -> new SortedNumericStarTreeValuesIterator(f2d4sndv) + ); + + Map> f2metricDocIdSetIterators = Map.of( + "sf_field2_sum_metric", + () -> new SortedNumericStarTreeValuesIterator(f2m1sndv), + "sf_field2_value_count_metric", + () -> new SortedNumericStarTreeValuesIterator(f2valucountsndv), + "sf__doc_count_doc_count_metric", + () -> new SortedNumericStarTreeValuesIterator(f2m2sndv) + ); + StarTreeValues starTreeValues2 = new StarTreeValues( + compositeField, + null, + f2dimDocIdSetIterators, + f2metricDocIdSetIterators, + getAttributes(1000), + null + ); + + this.docValuesConsumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + writeState, + Composite912DocValuesFormat.DATA_DOC_VALUES_CODEC, + Composite912DocValuesFormat.DATA_DOC_VALUES_EXTENSION, + Composite912DocValuesFormat.META_DOC_VALUES_CODEC, + Composite912DocValuesFormat.META_DOC_VALUES_EXTENSION + ); + builder = getStarTreeBuilder(metaOut, dataOut, compositeField, writeState, mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + [0, 0, 0, 0] | [0.0, 2] + [-1, -1, -1, -1] | [20.0, 2] + [2, 2, 2, 2] | [40.0, 2] + [-3, -3, -3, -3] | [60.0, 2] + [4, 4, 4, 4] | [80.0, 2] + [-5, -5, -5, -5] | [100.0, 2] + ... + */ + builder.appendDocumentsToStarTree(starTreeDocumentIterator); + builder.build(starTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); + int count = 0; + List actualDimensionValues = new ArrayList<>(numDocs); + for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { + if (count < 1000) { + actualDimensionValues.add(starTreeDocument.dimensions[0]); + } + count++; + } + + List expectedDimensionValues = new ArrayList<>(1000); + for (int i = 0; i < numDocs; i++) { + if (i <= 499) { + expectedDimensionValues.add((long) i * 2); // Positive even numbers + } else { + expectedDimensionValues.add((long) -(numDocs - i) * 2 + 1); // Negative odd numbers in decreasing order + } + } + assertEquals(expectedDimensionValues, actualDimensionValues); + + // Validate the star tree structure + validateStarTree(builder.getRootNode(), 4, 1, builder.getStarTreeDocuments()); + + metaOut.close(); + dataOut.close(); + docValuesConsumer.close(); + + StarTreeMetadata starTreeMetadata = getStarTreeMetadata( + getStarTreeDimensionNames(compositeField.getDimensionsOrder()), + 1000, + compositeField.getStarTreeConfig().maxLeafDocs(), + 132165 + ); + + // validateStarTreeFileFormats( + // builder.getRootNode(), + // builder.getStarTreeDocuments().size(), + // starTreeMetadata, + // builder.getStarTreeDocuments() + // ); + // TODO: Fix this post 2.19 [Handling search for unsigned-long as part of star-tree] + } + public void testMergeFlow_randomNumberTypes() throws Exception { DocumentMapper documentMapper = mock(DocumentMapper.class); @@ -352,9 +540,95 @@ public void testMergeFlowWithSum() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 264); + + validateStarTreeFileFormats( + builder.getRootNode(), + builder.getStarTreeDocuments().size(), + starTreeMetadata, + builder.getStarTreeDocuments() + ); + } + + public void testMergeFlowForUnsignedLongWithSum() throws IOException { + List dimList = List.of(0L, 1L, 3L, 4L, 5L, 6L); + List docsWithField = List.of(0, 1, 3, 4, 5, 6); + List dimList2 = List.of(0L, 1L, 2L, 3L, 4L, 5L, -1L); + List docsWithField2 = List.of(0, 1, 2, 3, 4, 5, 6); + + List metricsList = List.of( + getLongFromDouble(0.0), + getLongFromDouble(10.0), + getLongFromDouble(20.0), + getLongFromDouble(30.0), + getLongFromDouble(40.0), + getLongFromDouble(50.0), + getLongFromDouble(60.0) + ); + List metricsWithField = List.of(0, 1, 2, 3, 4, 5, 6); + + compositeField = getStarTreeFieldForUnsignedLong(MetricStat.SUM); + StarTreeValues starTreeValues = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + compositeField, + "6" + ); + + StarTreeValues starTreeValues2 = getStarTreeValues( + getSortedNumericMock(dimList, docsWithField), + getSortedNumericMock(dimList2, docsWithField2), + getSortedNumericMock(metricsList, metricsWithField), + compositeField, + "6" + ); + writeState = getWriteState(6, writeState.segmentInfo.getId()); + this.docValuesConsumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( + writeState, + Composite912DocValuesFormat.DATA_DOC_VALUES_CODEC, + Composite912DocValuesFormat.DATA_DOC_VALUES_EXTENSION, + Composite912DocValuesFormat.META_DOC_VALUES_CODEC, + Composite912DocValuesFormat.META_DOC_VALUES_EXTENSION + ); + builder = getStarTreeBuilder(metaOut, dataOut, compositeField, writeState, mapperService); + Iterator starTreeDocumentIterator = builder.mergeStarTrees(List.of(starTreeValues, starTreeValues2)); + /** + * Asserting following dim / metrics [ dim1, dim2 / Sum [ metric] ] + * [0, 0] | [0.0] + * [1, 1] | [20.0] + * [3, 3] | [60.0] + * [4, 4] | [80.0] + * [5, 5] | [100.0] + * [null, 2] | [40.0] + * ------------------ We only take non-star docs + * [6,-1] | [120.0] + */ + builder.appendDocumentsToStarTree(starTreeDocumentIterator); + assertEquals(6, builder.getStarTreeDocuments().size()); + builder.build(starTreeDocumentIterator, new AtomicInteger(), docValuesConsumer); + int count = 0; + for (StarTreeDocument starTreeDocument : builder.getStarTreeDocuments()) { + count++; + if (count <= 6) { + assertEquals( + starTreeDocument.dimensions[0] != null ? starTreeDocument.dimensions[0] * 2 * 10.0 : 40.0, + starTreeDocument.metrics[0] + ); + } + } + + validateStarTree(builder.getRootNode(), 2, 1000, builder.getStarTreeDocuments()); + + metaOut.close(); + dataOut.close(); + docValuesConsumer.close(); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.UNSIGNED_LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 264); validateStarTreeFileFormats( @@ -427,9 +701,9 @@ public void testMergeFlowWithCount() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 264); validateStarTreeFileFormats( @@ -576,9 +850,9 @@ public void testMergeFlowWithMissingDocs() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 10, 1000, 363); validateStarTreeFileFormats( @@ -666,9 +940,9 @@ public void testMergeFlowWithMissingDocsWithZero() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 231); validateStarTreeFileFormats( @@ -759,9 +1033,9 @@ public void testMergeFlowWithMissingDocsWithZeroComplexCase() throws IOException metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 7, 1000, 231); validateStarTreeFileFormats( @@ -848,9 +1122,9 @@ public void testMergeFlowWithMissingDocsInSecondDim() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 10, 1000, 363); validateStarTreeFileFormats( @@ -935,9 +1209,9 @@ public void testMergeFlowWithDocsMissingAtTheEnd() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 10, 1000, 363); validateStarTreeFileFormats( @@ -1010,9 +1284,9 @@ public void testMergeFlowWithEmptyFieldsInOneSegment() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 6, 1000, 264); validateStarTreeFileFormats( @@ -1411,9 +1685,9 @@ public void testMergeFlowWithDifferentDocsFromSegments() throws IOException { metaOut.close(); dataOut.close(); docValuesConsumer.close(); - LinkedHashMap map = new LinkedHashMap<>(); - map.put("field1", DocValuesType.SORTED_NUMERIC); - map.put("field3", DocValuesType.SORTED_NUMERIC); + LinkedHashMap map = new LinkedHashMap<>(); + map.put("field1", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); + map.put("field3", new DimensionConfig(DocValuesType.SORTED_NUMERIC, DimensionDataType.LONG)); StarTreeMetadata starTreeMetadata = getStarTreeMetadata(map, 9, 1000, 330); validateStarTreeFileFormats( diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java index 843a79d0877ce..7dd4f59312c82 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeBuilderTestCase.java @@ -26,6 +26,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.Numbers; import org.opensearch.common.Rounding; import org.opensearch.common.settings.Settings; import org.opensearch.index.codec.composite.composite912.Composite912DocValuesFormat; @@ -38,9 +39,11 @@ import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.compositeindex.datacube.OrdinalDimension; +import org.opensearch.index.compositeindex.datacube.UnsignedLongDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeDocument; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.DimensionConfig; import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.node.InMemoryTreeNode; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; @@ -249,7 +252,7 @@ SegmentWriteState getWriteState(int numDocs, byte[] id) { return BuilderTestsUtils.getWriteState(numDocs, id, fieldsInfo, directory); } - SegmentReadState getReadState(int numDocs, Map dimensionFields, List metrics) { + SegmentReadState getReadState(int numDocs, Map dimensionFields, List metrics) { return BuilderTestsUtils.getReadState(numDocs, dimensionFields, metrics, compositeField, writeState, directory); } @@ -257,11 +260,11 @@ protected Map getAttributes(int numSegmentDocs) { return Map.of(CompositeIndexConstants.SEGMENT_DOCS_COUNT, String.valueOf(numSegmentDocs)); } - protected LinkedHashMap getStarTreeDimensionNames(List dimensionsOrder) { - LinkedHashMap dimensionNames = new LinkedHashMap<>(); + protected LinkedHashMap getStarTreeDimensionNames(List dimensionsOrder) { + LinkedHashMap dimensionNames = new LinkedHashMap<>(); for (Dimension dimension : dimensionsOrder) { for (String dimensionName : dimension.getSubDimensionNames()) { - dimensionNames.put(dimensionName, dimension.getDocValuesType()); + dimensionNames.put(dimensionName, new DimensionConfig(dimension.getDocValuesType(), dimension.getDimensionDataType())); } } return dimensionNames; @@ -277,6 +280,16 @@ protected StarTreeField getStarTreeField(MetricStat count) { return new StarTreeField("sf", dims, metrics, c); } + protected StarTreeField getStarTreeFieldForUnsignedLong(MetricStat count) { + Dimension d1 = new UnsignedLongDimension("field1"); + Dimension d2 = new UnsignedLongDimension("field3"); + Metric m1 = new Metric("field2", List.of(count)); + List dims = List.of(d1, d2); + List metrics = List.of(m1); + StarTreeFieldConfiguration c = new StarTreeFieldConfiguration(1000, new HashSet<>(), getBuildMode()); + return new StarTreeField("sf", dims, metrics, c); + } + protected StarTreeField getStarTreeFieldWithDocCount(int maxLeafDocs, boolean includeDocCountMetric) { Dimension d1 = new NumericDimension("field1"); Dimension d2 = new NumericDimension("field3"); @@ -327,12 +340,66 @@ protected static List getExpectedStarTreeDocumentIterator() { ); } + protected static List getExpectedStarTreeDocumentIteratorForUnsignedLong() { + return List.of( + new StarTreeDocument(new Long[] { 2L, 4L, 3L, 4L }, new Object[] { 21.0, 14.0, 2L, 8.0, Numbers.unsignedLongToDouble(-1), 2L }), + new StarTreeDocument( + new Long[] { 3L, 4L, 2L, 1L }, + new Object[] { + 20.0 + Numbers.unsignedLongToDouble(-2L), + 28.0 + Numbers.unsignedLongToDouble(-9223372036854775808L), + 3L, + 6.0, + 24.0, + 3L } + ), + new StarTreeDocument( + new Long[] { null, 4L, 2L, 1L }, + new Object[] { + 20.0 + Numbers.unsignedLongToDouble(-2L), + 28.0 + Numbers.unsignedLongToDouble(-9223372036854775808L), + 3L, + 6.0, + 24.0, + 3L } + ), + new StarTreeDocument( + new Long[] { null, 4L, 3L, 4L }, + new Object[] { 21.0, 14.0, 2L, 8.0, Numbers.unsignedLongToDouble(-1), 2L } + ), + new StarTreeDocument( + new Long[] { null, 4L, null, 1L }, + new Object[] { + 20.0 + Numbers.unsignedLongToDouble(-2L), + 28.0 + Numbers.unsignedLongToDouble(-9223372036854775808L), + 3L, + 6.0, + 24.0, + 3L } + ), + new StarTreeDocument( + new Long[] { null, 4L, null, 4L }, + new Object[] { 21.0, 14.0, 2L, 8.0, Numbers.unsignedLongToDouble(-1), 2L } + ), + new StarTreeDocument( + new Long[] { null, 4L, null, null }, + new Object[] { + 46.0 + Numbers.unsignedLongToDouble(-2L), + 42.0 + Numbers.unsignedLongToDouble(-9223372036854775808L), + 5L, + 6.0, + Numbers.unsignedLongToDouble(-1), + 5L } + ) + ); + } + protected long getLongFromDouble(double value) { return NumericUtils.doubleToSortableLong(value); } protected StarTreeMetadata getStarTreeMetadata( - LinkedHashMap fields, + LinkedHashMap fields, int segmentAggregatedDocCount, int maxLeafDocs, int dataLength diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java index 51dee33662290..5622864c53ada 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/meta/StarTreeMetadataTests.java @@ -50,6 +50,7 @@ import static org.opensearch.index.compositeindex.CompositeIndexConstants.COMPOSITE_FIELD_MARKER; import static org.opensearch.index.compositeindex.datacube.startree.fileformats.StarTreeWriter.VERSION_CURRENT; import static org.opensearch.index.mapper.CompositeMappedFieldType.CompositeFieldType.STAR_TREE; +import static org.junit.Assert.assertEquals; public class StarTreeMetadataTests extends OpenSearchTestCase { @@ -184,15 +185,16 @@ public void test_starTreeMetadata() throws IOException { assertEquals(starTreeMetadata.getNumberOfNodes(), numberOfNodes); assertNotNull(starTreeMetadata); - assertEquals(dimensionsOrder.size(), starTreeMetadata.dimensionFieldsToDocValuesMap.size()); - int k = 0; - for (Map.Entry entry : starTreeMetadata.dimensionFieldsToDocValuesMap.entrySet()) { - assertEquals(dimensionsOrder.get(k).getField(), entry.getKey()); - k++; + assertEquals(dimensionsOrder.size(), starTreeMetadata.getDimensionFields().size()); + int index = 0; + for (Map.Entry entry : starTreeMetadata.getDimensionFields().entrySet()) { + Dimension dimension = dimensionsOrder.get(index++); + assertEquals(dimension.getField(), entry.getKey()); + assertEquals(dimension.getDocValuesType(), entry.getValue().getDocValuesType()); + assertEquals(dimension.getDimensionDataType(), entry.getValue().getDimensionDataType()); } assertEquals(starTreeField.getMetrics().size(), starTreeMetadata.getMetrics().size()); - for (int i = 0; i < starTreeField.getMetrics().size(); i++) { Metric expectedMetric = starTreeField.getMetrics().get(i); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java index b485ea1a4fe3e..ce1d36024524b 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeDocumentsSorterTests.java @@ -9,52 +9,140 @@ package org.opensearch.index.compositeindex.datacube.startree.utils; import org.opensearch.common.Randomness; +import org.opensearch.common.Rounding; +import org.opensearch.index.compositeindex.datacube.DateDimension; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.UnsignedLongDimension; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; +import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; /** * Tests for {@link StarTreeDocumentsSorter}. */ public class StarTreeDocumentsSorterTests extends OpenSearchTestCase { + private Map testData; + private List> comparatorList; @Before public void setUp() throws Exception { super.setUp(); testData = new HashMap<>(); - testData.put(0, new Long[] { -1L, 2L, 3L }); - testData.put(1, new Long[] { 1L, 2L, 2L }); - testData.put(2, new Long[] { -1L, -1L, 3L }); - testData.put(3, new Long[] { 1L, 2L, null }); - testData.put(4, new Long[] { 1L, null, 3L }); + comparatorList = new ArrayList<>(); + + List intervals = Arrays.asList( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.YEAR_OF_CENTURY), + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MONTH_OF_YEAR), + new DateTimeUnitAdapter(Rounding.DateTimeUnit.DAY_OF_MONTH), + new DateTimeUnitAdapter(Rounding.DateTimeUnit.HOUR_OF_DAY) + ); + DateDimension dateDimension = new DateDimension("timestamp", intervals, DateFieldMapper.Resolution.MILLISECONDS); + Long[] date_dims = new Long[4]; + Long testValue = 1609459200000L; // 2021-01-01 00:00:00 UTC + AtomicInteger dimIndex = new AtomicInteger(0); + dateDimension.setDimensionValues(testValue, value -> { date_dims[dimIndex.getAndIncrement()] = value; }); + + // 10 documents with 6 dimensions each + testData.put(0, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], null, 150L, 100L, 300L, null }); + testData.put(1, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 1L, null, -9223372036854775807L, 200L, 300L }); + testData.put(2, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 2L, -100L, -15L, 250L, null }); + testData.put( + 3, + new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 2L, -100L, -10L, 210L, -9223372036854775807L } + ); + testData.put(4, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 1L, 120L, null, null, 305L }); + testData.put(5, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 2L, 150L, -5L, 200L, 295L }); + testData.put(6, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 3L, 105L, null, -200L, -315L }); + testData.put(7, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 1L, 120L, -10L, 205L, 310L }); + testData.put( + 8, + new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], null, -100L, 9223372036854775807L, 200L, -300L } + ); + testData.put(9, new Long[] { date_dims[0], date_dims[1], date_dims[2], date_dims[3], 2L, null, -10L, 210L, 325L }); + + comparatorList.addAll(Collections.nCopies(4, dateDimension.comparator())); + comparatorList.add(new NumericDimension("dim1").comparator()); + comparatorList.add(new UnsignedLongDimension("dim2").comparator()); + comparatorList.add(new NumericDimension("dim3").comparator()); + comparatorList.add(new UnsignedLongDimension("dim4").comparator()); + comparatorList.add(new NumericDimension("dim5").comparator()); + } - public void testSortDocumentsOffHeap_FirstDimension() { - int[] sortedDocIds = { 0, 1, 2, 3, 4 }; + public void testSortDocumentsOffHeap_StartFromFirstDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; int dimensionId = -1; - int numDocs = 5; + int numDocs = 10; - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 7, 4, 1, 5, 2, 3, 9, 6, 0, 8 }, sortedDocIds); + } - assertArrayEquals(new int[] { 2, 0, 1, 3, 4 }, sortedDocIds); + public void testSortDocumentsOffHeap_StartFromSecondDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + int dimensionId = 0; + int numDocs = 10; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 7, 4, 1, 5, 2, 3, 9, 6, 0, 8 }, sortedDocIds); } - public void testSortDocumentsOffHeap_ThirdDimension() { - int[] sortedDocIds = { 0, 1, 2, 3, 4 }; + public void testSortDocumentsOffHeap_StartFromThirdDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; int dimensionId = 1; - int numDocs = 5; + int numDocs = 10; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 7, 4, 1, 5, 2, 3, 9, 6, 0, 8 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_StartFromFourthDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + int dimensionId = 2; + int numDocs = 10; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 7, 4, 1, 5, 2, 3, 9, 6, 0, 8 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_StartFromFifthDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + int dimensionId = 3; + int numDocs = 10; + + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 7, 4, 1, 5, 2, 3, 9, 6, 0, 8 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_StartFromSixthDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + int dimensionId = 4; + int numDocs = 10; - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 6, 7, 4, 5, 0, 2, 3, 8, 1, 9 }, sortedDocIds); + } + + public void testSortDocumentsOffHeap_StartFromSeventhDimension() { + int[] sortedDocIds = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }; + int dimensionId = 5; + int numDocs = 10; - assertArrayEquals(new int[] { 1, 0, 2, 4, 3 }, sortedDocIds); + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 1, 2, 7, 3, 9, 5, 0, 8, 6, 4 }, sortedDocIds); } public void testSortDocumentsOffHeap_SingleElement() { @@ -62,8 +150,7 @@ public void testSortDocumentsOffHeap_SingleElement() { int dimensionId = -1; int numDocs = 1; - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); - + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); assertArrayEquals(new int[] { 0 }, sortedDocIds); } @@ -72,32 +159,21 @@ public void testSortDocumentsOffHeap_EmptyArray() { int dimensionId = -1; int numDocs = 0; - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); - + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); assertArrayEquals(new int[] {}, sortedDocIds); } - public void testSortDocumentsOffHeap_SecondDimensionId() { - int[] sortedDocIds = { 0, 1, 2, 3, 4 }; - int dimensionId = 0; - int numDocs = 5; - - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); - - assertArrayEquals(new int[] { 2, 1, 0, 3, 4 }, sortedDocIds); - } - public void testSortDocumentsOffHeap_AllNulls() { Map testData = new HashMap<>(); - testData.put(0, new Long[] { null, null, null }); - testData.put(1, new Long[] { null, null, null }); - testData.put(2, new Long[] { null, null, null }); + testData.put(0, new Long[] { null, null, null, null, null, null, null, null, null }); + testData.put(1, new Long[] { null, null, null, null, null, null, null, null, null }); + testData.put(2, new Long[] { null, null, null, null, null, null, null, null, null }); int[] sortedDocIds = { 0, 1, 2 }; int dimensionId = -1; int numDocs = 3; - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); // The order should remain unchanged as all elements are equal (null) assertArrayEquals(new int[] { 0, 1, 2 }, sortedDocIds); @@ -105,23 +181,21 @@ public void testSortDocumentsOffHeap_AllNulls() { public void testSortDocumentsOffHeap_Negatives() { Map testData = new HashMap<>(); - testData.put(0, new Long[] { -10L, 0L }); - testData.put(1, new Long[] { -9L, 0L }); - testData.put(2, new Long[] { -8L, 0L }); - testData.put(3, new Long[] { -7L, -0L }); - testData.put(4, new Long[] { -15L, -0L }); + testData.put(0, new Long[] { -1L, -2L, -3L, -4L, -10L, 0L, null, 0L, -5L }); + testData.put(1, new Long[] { -5L, -2L, -3L, -4L, -9L, 0L, null, 0L, -10L }); + testData.put(2, new Long[] { -5L, -3L, -3L, -4L, -9L, 0L, null, 0L, 15L }); + testData.put(3, new Long[] { -9L, -2L, -3L, -4L, -7L, 0L, null, 0L, -20L }); + testData.put(4, new Long[] { -8L, -2L, -3L, -4L, -15L, 0L, null, 0L, -25L }); int[] sortedDocIds = { 0, 1, 2, 3, 4 }; int dimensionId = -1; int numDocs = 5; - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); - - // The order should remain unchanged as all elements are equal (null) - assertArrayEquals(new int[] { 4, 0, 1, 2, 3 }, sortedDocIds); + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); + assertArrayEquals(new int[] { 3, 4, 2, 1, 0 }, sortedDocIds); } - public void testRandomSort() { + public void testTheRandomSort() { int i = 0; while (i < 10) { testRandomizedSort(); @@ -156,9 +230,16 @@ private void testRandomizedSort() { // sort dimensionId + 1 to numDimensions // for example to start from dimension in 0th index, we need to pass -1 to sort method int dimensionId = random.nextInt(numDimensions) - 1; + List> comparatorList = new ArrayList<>(); + + for (int i = 0; i < numDimensions; i++) { + Boolean isUnsignedLong = random.nextBoolean(); + if (!isUnsignedLong) comparatorList.add(new NumericDimension("fieldName").comparator()); + else comparatorList.add(new UnsignedLongDimension("fieldName").comparator()); + } // Sort using StarTreeDocumentsSorter - StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i])); + StarTreeDocumentsSorter.sort(sortedDocIds, dimensionId, numDocs, i -> testData.get(sortedDocIds[i]), comparatorList); // Verify the sorting for (int i = 1; i < numDocs; i++) { @@ -166,7 +247,7 @@ private void testRandomizedSort() { Long[] curr = testData.get(sortedDocIds[i]); boolean isCorrectOrder = true; for (int j = dimensionId + 1; j < numDimensions; j++) { - int comparison = compareLongs(prev[j], curr[j]); + int comparison = comparatorList.get(j).compare(prev[j], curr[j]); if (comparison < 0) { break; } else if (comparison > 0) { @@ -186,16 +267,4 @@ private void testRandomizedSort() { } } - private int compareLongs(Long a, Long b) { - if (!Objects.equals(a, b)) { - if (a == null) { - return 1; - } else if (b == null) { - return -1; - } else { - return a.compareTo(b); - } - } - return 0; - } } diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 684704ad65b0a..78edd7c6fe4e7 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -25,6 +25,7 @@ import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.compositeindex.datacube.ReadDimension; +import org.opensearch.index.compositeindex.datacube.UnsignedLongDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; @@ -77,7 +78,7 @@ public void testValidStarTree() throws IOException { Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); for (CompositeMappedFieldType type : compositeFieldTypes) { StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; - assertEquals(2, starTreeFieldType.getDimensions().size()); + assertEquals(3, starTreeFieldType.getDimensions().size()); assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); @@ -89,6 +90,11 @@ public void testValidStarTree() throws IOException { assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getIntervals().get(i).shortName()); } assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertTrue(starTreeFieldType.getDimensions().get(1) instanceof NumericDimension); + + assertEquals("unsignedLongDimension", starTreeFieldType.getDimensions().get(2).getField()); + assertTrue(starTreeFieldType.getDimensions().get(2) instanceof UnsignedLongDimension); + assertEquals(2, starTreeFieldType.getMetrics().size()); assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); @@ -154,6 +160,11 @@ public void testMetricsWithJustSum() throws IOException { assertEquals(new DateTimeUnitAdapter(expectedTimeUnits.get(i)), dateDim.getIntervals().get(i)); } assertEquals("status", starTreeFieldType.getDimensions().get(1).getField()); + assertTrue(starTreeFieldType.getDimensions().get(1) instanceof NumericDimension); + + assertEquals("unsignedLongDimension", starTreeFieldType.getDimensions().get(2).getField()); + assertTrue(starTreeFieldType.getDimensions().get(2) instanceof UnsignedLongDimension); + assertEquals("size", starTreeFieldType.getMetrics().get(0).getField()); // Assert AVG gets added when both of its base metrics is already present @@ -162,7 +173,7 @@ public void testMetricsWithJustSum() throws IOException { assertEquals(100, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); assertEquals( - new HashSet<>(Arrays.asList("@timestamp", "status")), + new HashSet<>(Arrays.asList("@timestamp", "status", "unsignedLongDimension")), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims() ); } @@ -548,6 +559,7 @@ public void testStarTreeField() { DateDimension d1 = new DateDimension("name", d1CalendarIntervals, DateFieldMapper.Resolution.MILLISECONDS); NumericDimension n1 = new NumericDimension("numeric"); NumericDimension n2 = new NumericDimension("name1"); + UnsignedLongDimension n3 = new UnsignedLongDimension("name2"); List metrics = List.of(metric1); List dims = List.of(d1, n2); @@ -561,6 +573,11 @@ public void testStarTreeField() { StarTreeField field2 = new StarTreeField("starTree", dims, metrics, config); assertEquals(field1, field2); + List dims1 = List.of(d1, n1, n2, n3); + StarTreeField field3 = new StarTreeField("starTree", dims1, metrics, config); + StarTreeField field4 = new StarTreeField("starTree", dims1, metrics, config); + assertEquals(field3, field4); + dims = List.of(d1, n2, n1); field2 = new StarTreeField("starTree", dims, metrics, config); assertNotEquals(field1, field2); @@ -677,6 +694,9 @@ private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) b.startObject(); b.field("name", dim); b.endObject(); + b.startObject(); + b.field("name", "unsignedLongDimension"); // UnsignedLongDimension + b.endObject(); b.endArray(); b.startArray("metrics"); b.startObject(); @@ -702,6 +722,9 @@ private XContentBuilder getExpandedMappingWithJustAvg(String dim, String metric) b.startObject("keyword1"); b.field("type", "keyword"); b.endObject(); + b.startObject("unsignedLongDimension"); + b.field("type", "unsigned_long"); + b.endObject(); b.endObject(); }); } @@ -768,6 +791,7 @@ private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) { b.value("@timestamp"); b.value("status"); + b.value("unsignedLongDimension"); } b.endArray(); b.startObject("date_dimension"); @@ -781,6 +805,9 @@ private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) b.startObject(); b.field("name", dim); b.endObject(); + b.startObject(); + b.field("name", "unsignedLongDimension"); // UnsignedLongDimension + b.endObject(); b.endArray(); b.startArray("metrics"); b.startObject(); @@ -806,6 +833,9 @@ private XContentBuilder getExpandedMappingWithJustSum(String dim, String metric) b.startObject("keyword1"); b.field("type", "keyword"); b.endObject(); + b.startObject("unsignedLongDimension"); + b.field("type", "unsigned_long"); + b.endObject(); b.endObject(); }); } From 32a88eb16bb7bff15fa5430c1490cd102c6cca28 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 28 Jan 2025 08:44:21 -0800 Subject: [PATCH 22/48] Remove MasterService class (#17151) This has been deprecated and is replaced by ClusterManagerService. Signed-off-by: Andrew Ross --- .../ClusterManagerTaskThrottlingIT.java | 2 +- .../service/ClusterManagerService.java | 991 +++++++++++++++- .../cluster/service/ClusterService.java | 22 - .../cluster/service/MasterService.java | 1050 ----------------- .../common/settings/ClusterSettings.java | 1 - .../cluster/coordination/NodeJoinTests.java | 16 +- ...s.java => ClusterManagerServiceTests.java} | 169 +-- .../ClusterManagerTaskThrottlerTests.java | 4 +- .../cluster/service/ClusterServiceTests.java | 40 - .../MasterServiceRenamedSettingTests.java | 99 -- .../AbstractCoordinatorTestCase.java | 2 +- .../FakeThreadPoolClusterManagerService.java | 2 +- .../service/FakeThreadPoolMasterService.java | 28 - .../opensearch/test/ClusterServiceUtils.java | 13 - .../BlockMasterServiceOnMaster.java | 21 - .../BusyMasterServiceDisruption.java | 23 - 16 files changed, 1087 insertions(+), 1396 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/cluster/service/MasterService.java rename server/src/test/java/org/opensearch/cluster/service/{MasterServiceTests.java => ClusterManagerServiceTests.java} (92%) delete mode 100644 server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java delete mode 100644 server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java delete mode 100644 test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java delete mode 100644 test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java delete mode 100644 test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java diff --git a/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java b/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java index 3b80ef7820e08..3fb67997680c0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/clustermanager/ClusterManagerTaskThrottlingIT.java @@ -86,7 +86,7 @@ public void onFailure(Exception e) { assertEquals(totalRequest + throttledRequest.get(), requestCountOnClusterManager.get()); assertBusy( - () -> { assertEquals(clusterService().getMasterService().numberOfThrottledPendingTasks(), throttledRequest.get()); } + () -> { assertEquals(clusterService().getClusterManagerService().numberOfThrottledPendingTasks(), throttledRequest.get()); } ); } finally { clusterSettingCleanUp(); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java index fa8c965b4d538..ede0985c2b420 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerService.java @@ -6,24 +6,130 @@ * compatible open source license. */ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + package org.opensearch.cluster.service; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.AckedClusterStateTaskListener; +import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterManagerMetrics; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterState.Builder; +import org.opensearch.cluster.ClusterStateTaskConfig; +import org.opensearch.cluster.ClusterStateTaskExecutor; +import org.opensearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; +import org.opensearch.cluster.ClusterStateTaskListener; +import org.opensearch.cluster.coordination.ClusterStatePublisher; +import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.common.Nullable; +import org.opensearch.common.Priority; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.CountDown; +import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextAccess; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.text.Text; +import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.discovery.Discovery; +import org.opensearch.node.Node; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; +import org.opensearch.telemetry.metrics.tags.Tags; +import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; + /** * Main Cluster Manager Node Service * * @opensearch.api */ @PublicApi(since = "2.2.0") -public class ClusterManagerService extends MasterService { +public class ClusterManagerService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(ClusterManagerService.class); + + // The setting below is going to replace the above. + // To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. + public static final Setting CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting( + "cluster.service.slow_cluster_manager_task_logging_threshold", + TimeValue.timeValueSeconds(10), + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + static final String CLUSTER_MANAGER_UPDATE_THREAD_NAME = "clusterManagerService#updateTask"; + + ClusterStatePublisher clusterStatePublisher; + + private final String nodeName; + + private java.util.function.Supplier clusterStateSupplier; + + private volatile TimeValue slowTaskLoggingThreshold; + + protected final ThreadPool threadPool; + + private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor; + private volatile Batcher taskBatcher; + protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; + private final ClusterManagerThrottlingStats throttlingStats; + private final ClusterStateStats stateStats; + private final ClusterManagerMetrics clusterManagerMetrics; public ClusterManagerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - super(settings, clusterSettings, threadPool); + this(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); } public ClusterManagerService( @@ -32,6 +138,885 @@ public ClusterManagerService( ThreadPool threadPool, ClusterManagerMetrics clusterManagerMetrics ) { - super(settings, clusterSettings, threadPool, clusterManagerMetrics); + this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); + + this.slowTaskLoggingThreshold = CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + this::setSlowTaskLoggingThreshold + ); + + this.throttlingStats = new ClusterManagerThrottlingStats(); + this.clusterManagerTaskThrottler = new ClusterManagerTaskThrottler( + settings, + clusterSettings, + this::getMinNodeVersion, + throttlingStats + ); + this.stateStats = new ClusterStateStats(); + this.threadPool = threadPool; + this.clusterManagerMetrics = clusterManagerMetrics; + } + + private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { + this.slowTaskLoggingThreshold = slowTaskLoggingThreshold; + } + + public synchronized void setClusterStatePublisher(ClusterStatePublisher publisher) { + clusterStatePublisher = publisher; + } + + public synchronized void setClusterStateSupplier(java.util.function.Supplier clusterStateSupplier) { + this.clusterStateSupplier = clusterStateSupplier; + } + + @Override + protected synchronized void doStart() { + Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); + Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting"); + threadPoolExecutor = createThreadPoolExecutor(); + taskBatcher = new Batcher(logger, threadPoolExecutor, clusterManagerTaskThrottler); + } + + protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { + return OpenSearchExecutors.newSinglePrioritizing( + nodeName + "/" + CLUSTER_MANAGER_UPDATE_THREAD_NAME, + daemonThreadFactory(nodeName, CLUSTER_MANAGER_UPDATE_THREAD_NAME), + threadPool.getThreadContext(), + threadPool.scheduler() + ); + } + + @SuppressWarnings("unchecked") + class Batcher extends TaskBatcher { + + Batcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor, TaskBatcherListener taskBatcherListener) { + super(logger, threadExecutor, taskBatcherListener); + } + + @Override + protected void onTimeout(List tasks, TimeValue timeout) { + threadPool.generic() + .execute( + () -> tasks.forEach( + task -> ((UpdateTask) task).listener.onFailure( + task.source, + new ProcessClusterEventTimeoutException(timeout, task.source) + ) + ) + ); + } + + @Override + protected void run(Object batchingKey, List tasks, Function taskSummaryGenerator) { + ClusterStateTaskExecutor taskExecutor = (ClusterStateTaskExecutor) batchingKey; + List updateTasks = (List) tasks; + runTasks(new TaskInputs(taskExecutor, updateTasks, taskSummaryGenerator)); + } + + class UpdateTask extends BatchedTask { + final ClusterStateTaskListener listener; + + UpdateTask( + Priority priority, + String source, + Object task, + ClusterStateTaskListener listener, + ClusterStateTaskExecutor executor + ) { + super(priority, source, executor, task); + this.listener = listener; + } + + @Override + public String describeTasks(List tasks) { + return ((ClusterStateTaskExecutor) batchingKey).describeTasks( + tasks.stream().map(BatchedTask::getTask).collect(Collectors.toList()) + ); + } + } + } + + @Override + protected synchronized void doStop() { + ThreadPool.terminate(threadPoolExecutor, 10, TimeUnit.SECONDS); + } + + @Override + protected synchronized void doClose() {} + + /** + * The current cluster state exposed by the discovery layer. Package-visible for tests. + */ + ClusterState state() { + return clusterStateSupplier.get(); + } + + private static boolean isClusterManagerUpdateThread() { + return Thread.currentThread().getName().contains(CLUSTER_MANAGER_UPDATE_THREAD_NAME); + } + + public static boolean assertClusterManagerUpdateThread() { + assert isClusterManagerUpdateThread() : "not called from the cluster-manager service thread"; + return true; + } + + public static boolean assertNotClusterManagerUpdateThread(String reason) { + assert isClusterManagerUpdateThread() == false : "Expected current thread [" + + Thread.currentThread() + + "] to not be the cluster-manager service thread. Reason: [" + + reason + + "]"; + return true; + } + + private void runTasks(TaskInputs taskInputs) { + final String summary; + if (logger.isTraceEnabled()) { + summary = taskInputs.taskSummaryGenerator.apply(true); + } else { + summary = taskInputs.taskSummaryGenerator.apply(false); + } + + if (!lifecycle.started()) { + logger.debug("processing [{}]: ignoring, cluster-manager service not started", summary); + return; + } + + if (logger.isTraceEnabled()) { + logger.trace("executing cluster state update for [{}]", summary); + } else { + logger.debug("executing cluster state update for [{}]", summary); + } + + final ClusterState previousClusterState = state(); + + if (!previousClusterState.nodes().isLocalNodeElectedClusterManager() && taskInputs.runOnlyWhenClusterManager()) { + logger.debug("failing [{}]: local node is no longer cluster-manager", summary); + taskInputs.onNoLongerClusterManager(); + return; + } + + final long computationStartTime = threadPool.preciseRelativeTimeInNanos(); + final TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterState, summary); + taskOutputs.notifyFailedTasks(); + final TimeValue computationTime = getTimeSince(computationStartTime); + logExecutionTime(computationTime, "compute cluster state update", summary); + + clusterManagerMetrics.recordLatency( + clusterManagerMetrics.clusterStateComputeHistogram, + (double) computationTime.getMillis(), + Optional.of(Tags.create().addTag("Operation", taskInputs.executor.getClass().getSimpleName())) + ); + + if (taskOutputs.clusterStateUnchanged()) { + final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); + taskOutputs.notifySuccessfulTasksOnUnchangedClusterState(); + final TimeValue executionTime = getTimeSince(notificationStartTime); + logExecutionTime(executionTime, "notify listeners on unchanged cluster state", summary); + } else { + final ClusterState newClusterState = taskOutputs.newClusterState; + if (logger.isTraceEnabled()) { + logger.trace("cluster state updated, source [{}]\n{}", summary, newClusterState); + } else { + logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), summary); + } + final long publicationStartTime = threadPool.preciseRelativeTimeInNanos(); + try { + ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(summary, newClusterState, previousClusterState); + // new cluster state, notify all listeners + final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); + if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { + String nodesDeltaSummary = nodesDelta.shortSummary(); + if (nodesDeltaSummary.length() > 0) { + logger.info( + "{}, term: {}, version: {}, delta: {}", + summary, + newClusterState.term(), + newClusterState.version(), + nodesDeltaSummary + ); + } + } + + logger.debug("publishing cluster state version [{}]", newClusterState.version()); + publish(clusterChangedEvent, taskOutputs, publicationStartTime); + } catch (Exception e) { + handleException(summary, publicationStartTime, newClusterState, e); + } + } + } + + private TimeValue getTimeSince(long startTimeNanos) { + return TimeValue.timeValueMillis(TimeValue.nsecToMSec(threadPool.preciseRelativeTimeInNanos() - startTimeNanos)); + } + + protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeNanos) { + final PlainActionFuture fut = new PlainActionFuture() { + @Override + protected boolean blockingAllowed() { + return isClusterManagerUpdateThread() || super.blockingAllowed(); + } + }; + clusterStatePublisher.publish(clusterChangedEvent, fut, taskOutputs.createAckListener(threadPool, clusterChangedEvent.state())); + + // indefinitely wait for publication to complete + try { + FutureUtils.get(fut); + onPublicationSuccess(clusterChangedEvent, taskOutputs); + final long durationMillis = getTimeSince(startTimeNanos).millis(); + stateStats.stateUpdateTook(durationMillis); + stateStats.stateUpdated(); + clusterManagerMetrics.recordLatency(clusterManagerMetrics.clusterStatePublishHistogram, (double) durationMillis); + } catch (Exception e) { + stateStats.stateUpdateFailed(); + onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); + } + } + + void onPublicationSuccess(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs) { + final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); + taskOutputs.processedDifferentClusterState(clusterChangedEvent.previousState(), clusterChangedEvent.state()); + + try { + taskOutputs.clusterStatePublished(clusterChangedEvent); + } catch (Exception e) { + logger.error( + () -> new ParameterizedMessage( + "exception thrown while notifying executor of new cluster state publication [{}]", + clusterChangedEvent.source() + ), + e + ); + } + final TimeValue executionTime = getTimeSince(notificationStartTime); + logExecutionTime( + executionTime, + "notify listeners on successful publication of cluster state (version: " + + clusterChangedEvent.state().version() + + ", uuid: " + + clusterChangedEvent.state().stateUUID() + + ')', + clusterChangedEvent.source() + ); + } + + void onPublicationFailed(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeMillis, Exception exception) { + if (exception instanceof FailedToCommitClusterStateException) { + final long version = clusterChangedEvent.state().version(); + logger.warn( + () -> new ParameterizedMessage( + "failing [{}]: failed to commit cluster state version [{}]", + clusterChangedEvent.source(), + version + ), + exception + ); + taskOutputs.publishingFailed((FailedToCommitClusterStateException) exception); + } else { + handleException(clusterChangedEvent.source(), startTimeMillis, clusterChangedEvent.state(), exception); + } + } + + private void handleException(String summary, long startTimeMillis, ClusterState newClusterState, Exception e) { + final TimeValue executionTime = getTimeSince(startTimeMillis); + final long version = newClusterState.version(); + final String stateUUID = newClusterState.stateUUID(); + final String fullState = newClusterState.toString(); + logger.warn( + new ParameterizedMessage( + "took [{}] and then failed to publish updated cluster state (version: {}, uuid: {}) for [{}]:\n{}", + executionTime, + version, + stateUUID, + summary, + fullState + ), + e + ); + // TODO: do we want to call updateTask.onFailure here? + } + + private TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, String taskSummary) { + ClusterTasksResult clusterTasksResult = executeTasks(taskInputs, previousClusterState, taskSummary); + ClusterState newClusterState = patchVersions(previousClusterState, clusterTasksResult); + return new TaskOutputs( + taskInputs, + previousClusterState, + newClusterState, + getNonFailedTasks(taskInputs, clusterTasksResult), + clusterTasksResult.executionResults + ); + } + + private ClusterState patchVersions(ClusterState previousClusterState, ClusterTasksResult executionResult) { + ClusterState newClusterState = executionResult.resultingState; + + if (previousClusterState != newClusterState) { + // only the cluster-manager controls the version numbers + Builder builder = incrementVersion(newClusterState); + if (previousClusterState.routingTable() != newClusterState.routingTable()) { + builder.routingTable( + RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build() + ); + } + if (previousClusterState.metadata() != newClusterState.metadata()) { + builder.metadata(Metadata.builder(newClusterState.metadata()).version(newClusterState.metadata().version() + 1)); + } + + newClusterState = builder.build(); + } + + return newClusterState; + } + + public Builder incrementVersion(ClusterState clusterState) { + return ClusterState.builder(clusterState).incrementVersion(); + } + + /** + * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, + * ClusterStateTaskExecutor, ClusterStateTaskListener)}, submitted updates will not be batched. + * + * @param source the source of the cluster state update task + * @param updateTask the full context for the cluster state update + * task + */ + public & ClusterStateTaskListener> void submitStateUpdateTask( + String source, + T updateTask + ) { + submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask); + } + + /** + * Submits a cluster state update task; submitted updates will be + * batched across the same instance of executor. The exact batching + * semantics depend on the underlying implementation but a rough + * guideline is that if the update task is submitted while there + * are pending update tasks for the same executor, these update + * tasks will all be executed on the executor in a single batch + * + * @param source the source of the cluster state update task + * @param task the state needed for the cluster state update task + * @param config the cluster state update task configuration + * @param executor the cluster state update task executor; tasks + * that share the same executor will be executed + * batches on this executor + * @param listener callback after the cluster state update task + * completes + * @param the type of the cluster state update task state + */ + public void submitStateUpdateTask( + String source, + T task, + ClusterStateTaskConfig config, + ClusterStateTaskExecutor executor, + ClusterStateTaskListener listener + ) { + submitStateUpdateTasks(source, Collections.singletonMap(task, listener), config, executor); + } + + /** + * Output created by executing a set of tasks provided as TaskInputs + */ + class TaskOutputs { + final TaskInputs taskInputs; + final ClusterState previousClusterState; + final ClusterState newClusterState; + final List nonFailedTasks; + final Map executionResults; + + TaskOutputs( + TaskInputs taskInputs, + ClusterState previousClusterState, + ClusterState newClusterState, + List nonFailedTasks, + Map executionResults + ) { + this.taskInputs = taskInputs; + this.previousClusterState = previousClusterState; + this.newClusterState = newClusterState; + this.nonFailedTasks = nonFailedTasks; + this.executionResults = executionResults; + } + + void publishingFailed(FailedToCommitClusterStateException t) { + nonFailedTasks.forEach(task -> task.listener.onFailure(task.source(), t)); + } + + void processedDifferentClusterState(ClusterState previousClusterState, ClusterState newClusterState) { + nonFailedTasks.forEach(task -> task.listener.clusterStateProcessed(task.source(), previousClusterState, newClusterState)); + } + + void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { + taskInputs.executor.clusterStatePublished(clusterChangedEvent); + } + + Discovery.AckListener createAckListener(ThreadPool threadPool, ClusterState newClusterState) { + return new DelegatingAckListener( + nonFailedTasks.stream() + .filter(task -> task.listener instanceof AckedClusterStateTaskListener) + .map( + task -> new AckCountDownListener( + (AckedClusterStateTaskListener) task.listener, + newClusterState.version(), + newClusterState.nodes(), + threadPool + ) + ) + .collect(Collectors.toList()) + ); + } + + boolean clusterStateUnchanged() { + return previousClusterState == newClusterState; + } + + void notifyFailedTasks() { + // fail all tasks that have failed + for (Batcher.UpdateTask updateTask : taskInputs.updateTasks) { + assert executionResults.containsKey(updateTask.task) : "missing " + updateTask; + final ClusterStateTaskExecutor.TaskResult taskResult = executionResults.get(updateTask.task); + if (taskResult.isSuccess() == false) { + updateTask.listener.onFailure(updateTask.source(), taskResult.getFailure()); + } + } + } + + void notifySuccessfulTasksOnUnchangedClusterState() { + nonFailedTasks.forEach(task -> { + if (task.listener instanceof AckedClusterStateTaskListener) { + // no need to wait for ack if nothing changed, the update can be counted as acknowledged + ((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null); + } + task.listener.clusterStateProcessed(task.source(), newClusterState, newClusterState); + }); + } + } + + /** + * Returns the tasks that are pending. + */ + public List pendingTasks() { + return Arrays.stream(threadPoolExecutor.getPending()).map(pending -> { + assert pending.task instanceof SourcePrioritizedRunnable + : "thread pool executor should only use SourcePrioritizedRunnable instances but found: " + + pending.task.getClass().getName(); + SourcePrioritizedRunnable task = (SourcePrioritizedRunnable) pending.task; + return new PendingClusterTask( + pending.insertionOrder, + pending.priority, + new Text(task.source()), + task.getAgeInMillis(), + pending.executing + ); + }).collect(Collectors.toList()); + } + + /** + * Returns the number of throttled pending tasks. + */ + public long numberOfThrottledPendingTasks() { + return throttlingStats.getTotalThrottledTaskCount(); + } + + /** + * Returns the stats of throttled pending tasks. + */ + public ClusterManagerThrottlingStats getThrottlingStats() { + return throttlingStats; } + + /** + * Returns the min version of nodes in cluster + */ + public Version getMinNodeVersion() { + return state().getNodes().getMinNodeVersion(); + } + + /** + * Returns the number of currently pending tasks. + */ + public int numberOfPendingTasks() { + return threadPoolExecutor.getNumberOfPendingTasks(); + } + + /** + * Returns the maximum wait time for tasks in the queue + * + * @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue + */ + public TimeValue getMaxTaskWaitTime() { + return threadPoolExecutor.getMaxTaskWaitTime(); + } + + private SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Supplier contextSupplier) { + if (listener instanceof AckedClusterStateTaskListener) { + return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, contextSupplier, logger); + } else { + return new SafeClusterStateTaskListener(listener, contextSupplier, logger); + } + } + + private static class SafeClusterStateTaskListener implements ClusterStateTaskListener { + private final ClusterStateTaskListener listener; + protected final Supplier context; + private final Logger logger; + + SafeClusterStateTaskListener(ClusterStateTaskListener listener, Supplier context, Logger logger) { + this.listener = listener; + this.context = context; + this.logger = logger; + } + + @Override + public void onFailure(String source, Exception e) { + try (ThreadContext.StoredContext ignore = context.get()) { + listener.onFailure(source, e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.error(() -> new ParameterizedMessage("exception thrown by listener notifying of failure from [{}]", source), inner); + } + } + + @Override + public void onNoLongerClusterManager(String source) { + try (ThreadContext.StoredContext ignore = context.get()) { + listener.onNoLongerClusterManager(source); + } catch (Exception e) { + logger.error( + () -> new ParameterizedMessage( + "exception thrown by listener while notifying no longer cluster-manager from [{}]", + source + ), + e + ); + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + try (ThreadContext.StoredContext ignore = context.get()) { + listener.clusterStateProcessed(source, oldState, newState); + } catch (Exception e) { + logger.error( + () -> new ParameterizedMessage( + "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" + + "{}\nnew cluster state:\n{}", + source, + oldState, + newState + ), + e + ); + } + } + } + + private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener { + private final AckedClusterStateTaskListener listener; + private final Logger logger; + + SafeAckedClusterStateTaskListener( + AckedClusterStateTaskListener listener, + Supplier context, + Logger logger + ) { + super(listener, context, logger); + this.listener = listener; + this.logger = logger; + } + + @Override + public boolean mustAck(DiscoveryNode discoveryNode) { + return listener.mustAck(discoveryNode); + } + + @Override + public void onAllNodesAcked(@Nullable Exception e) { + try (ThreadContext.StoredContext ignore = context.get()) { + listener.onAllNodesAcked(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.error("exception thrown by listener while notifying on all nodes acked", inner); + } + } + + @Override + public void onAckTimeout() { + try (ThreadContext.StoredContext ignore = context.get()) { + listener.onAckTimeout(); + } catch (Exception e) { + logger.error("exception thrown by listener while notifying on ack timeout", e); + } + } + + @Override + public TimeValue ackTimeout() { + return listener.ackTimeout(); + } + } + + private void logExecutionTime(TimeValue executionTime, String activity, String summary) { + if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) { + logger.warn("took [{}], which is over [{}], to {} for [{}]", executionTime, slowTaskLoggingThreshold, activity, summary); + } else { + logger.debug("took [{}] to {} for [{}]", executionTime, activity, summary); + } + } + + private static class DelegatingAckListener implements Discovery.AckListener { + + private final List listeners; + + private DelegatingAckListener(List listeners) { + this.listeners = listeners; + } + + @Override + public void onCommit(TimeValue commitTime) { + for (Discovery.AckListener listener : listeners) { + listener.onCommit(commitTime); + } + } + + @Override + public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { + for (Discovery.AckListener listener : listeners) { + listener.onNodeAck(node, e); + } + } + } + + private static class AckCountDownListener implements Discovery.AckListener { + + private static final Logger logger = LogManager.getLogger(AckCountDownListener.class); + + private final AckedClusterStateTaskListener ackedTaskListener; + private final CountDown countDown; + private final DiscoveryNode clusterManagerNode; + private final ThreadPool threadPool; + private final long clusterStateVersion; + private volatile Scheduler.Cancellable ackTimeoutCallback; + private Exception lastFailure; + + AckCountDownListener( + AckedClusterStateTaskListener ackedTaskListener, + long clusterStateVersion, + DiscoveryNodes nodes, + ThreadPool threadPool + ) { + this.ackedTaskListener = ackedTaskListener; + this.clusterStateVersion = clusterStateVersion; + this.threadPool = threadPool; + this.clusterManagerNode = nodes.getClusterManagerNode(); + int countDown = 0; + for (DiscoveryNode node : nodes) { + // we always wait for at least the cluster-manager node + if (node.equals(clusterManagerNode) || ackedTaskListener.mustAck(node)) { + countDown++; + } + } + logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion); + this.countDown = new CountDown(countDown + 1); // we also wait for onCommit to be called + } + + @Override + public void onCommit(TimeValue commitTime) { + TimeValue ackTimeout = ackedTaskListener.ackTimeout(); + if (ackTimeout == null) { + ackTimeout = TimeValue.ZERO; + } + final TimeValue timeLeft = TimeValue.timeValueNanos(Math.max(0, ackTimeout.nanos() - commitTime.nanos())); + if (timeLeft.nanos() == 0L) { + onTimeout(); + } else if (countDown.countDown()) { + finish(); + } else { + this.ackTimeoutCallback = threadPool.schedule(this::onTimeout, timeLeft, ThreadPool.Names.GENERIC); + // re-check if onNodeAck has not completed while we were scheduling the timeout + if (countDown.isCountedDown()) { + ackTimeoutCallback.cancel(); + } + } + } + + @Override + public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { + if (node.equals(clusterManagerNode) == false && ackedTaskListener.mustAck(node) == false) { + return; + } + if (e == null) { + logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); + } else { + this.lastFailure = e; + logger.debug( + () -> new ParameterizedMessage( + "ack received from node [{}], cluster_state update (version: {})", + node, + clusterStateVersion + ), + e + ); + } + + if (countDown.countDown()) { + finish(); + } + } + + private void finish() { + logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion); + if (ackTimeoutCallback != null) { + ackTimeoutCallback.cancel(); + } + ackedTaskListener.onAllNodesAcked(lastFailure); + } + + public void onTimeout() { + if (countDown.fastForward()) { + logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion); + ackedTaskListener.onAckTimeout(); + } + } + } + + private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterState previousClusterState, String taskSummary) { + ClusterTasksResult clusterTasksResult; + try { + List inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); + clusterTasksResult = taskInputs.executor.execute(previousClusterState, inputs); + if (previousClusterState != clusterTasksResult.resultingState + && previousClusterState.nodes().isLocalNodeElectedClusterManager() + && (clusterTasksResult.resultingState.nodes().isLocalNodeElectedClusterManager() == false)) { + throw new AssertionError("update task submitted to ClusterManagerService cannot remove cluster-manager"); + } + } catch (Exception e) { + logger.trace( + () -> new ParameterizedMessage( + "failed to execute cluster state update (on version: [{}], uuid: [{}]) for [{}]\n{}{}{}", + previousClusterState.version(), + previousClusterState.stateUUID(), + taskSummary, + previousClusterState.nodes(), + previousClusterState.routingTable(), + previousClusterState.getRoutingNodes() + ), // may be expensive => construct message lazily + e + ); + clusterTasksResult = ClusterTasksResult.builder() + .failures(taskInputs.updateTasks.stream().map(updateTask -> updateTask.task)::iterator, e) + .build(previousClusterState); + } + + assert clusterTasksResult.executionResults != null; + assert clusterTasksResult.executionResults.size() == taskInputs.updateTasks.size() : String.format( + Locale.ROOT, + "expected [%d] task result%s but was [%d]", + taskInputs.updateTasks.size(), + taskInputs.updateTasks.size() == 1 ? "" : "s", + clusterTasksResult.executionResults.size() + ); + if (Assertions.ENABLED) { + ClusterTasksResult finalClusterTasksResult = clusterTasksResult; + taskInputs.updateTasks.forEach(updateTask -> { + assert finalClusterTasksResult.executionResults.containsKey(updateTask.task) : "missing task result for " + updateTask; + }); + } + + return clusterTasksResult; + } + + private List getNonFailedTasks(TaskInputs taskInputs, ClusterTasksResult clusterTasksResult) { + return taskInputs.updateTasks.stream().filter(updateTask -> { + assert clusterTasksResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask; + final ClusterStateTaskExecutor.TaskResult taskResult = clusterTasksResult.executionResults.get(updateTask.task); + return taskResult.isSuccess(); + }).collect(Collectors.toList()); + } + + /** + * Represents a set of tasks to be processed together with their executor + */ + private class TaskInputs { + + final List updateTasks; + final ClusterStateTaskExecutor executor; + final Function taskSummaryGenerator; + + TaskInputs( + ClusterStateTaskExecutor executor, + List updateTasks, + final Function taskSummaryGenerator + ) { + this.executor = executor; + this.updateTasks = updateTasks; + this.taskSummaryGenerator = taskSummaryGenerator; + } + + boolean runOnlyWhenClusterManager() { + return executor.runOnlyOnClusterManager(); + } + + void onNoLongerClusterManager() { + updateTasks.forEach(task -> task.listener.onNoLongerClusterManager(task.source())); + } + } + + /** + * Functionality for register task key to cluster manager node. + * + * @param taskKey - task key of task + * @param throttlingEnabled - throttling is enabled for task or not i.e does data node perform retries on it or not + * @return throttling task key which needs to be passed while submitting task to cluster manager + */ + public ClusterManagerTaskThrottler.ThrottlingKey registerClusterManagerTask(String taskKey, boolean throttlingEnabled) { + return clusterManagerTaskThrottler.registerClusterManagerTask(taskKey, throttlingEnabled); + } + + /** + * Submits a batch of cluster state update tasks; submitted updates are guaranteed to be processed together, + * potentially with more tasks of the same executor. + * + * @param source the source of the cluster state update task + * @param tasks a map of update tasks and their corresponding listeners + * @param config the cluster state update task configuration + * @param executor the cluster state update task executor; tasks + * that share the same executor will be executed + * batches on this executor + * @param the type of the cluster state update task state + */ + public void submitStateUpdateTasks( + final String source, + final Map tasks, + final ClusterStateTaskConfig config, + final ClusterStateTaskExecutor executor + ) { + if (!lifecycle.started()) { + return; + } + final ThreadContext threadContext = threadPool.getThreadContext(); + final Supplier supplier = threadContext.newRestorableContext(true); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); + + List safeTasks = tasks.entrySet() + .stream() + .map(e -> taskBatcher.new UpdateTask(config.priority(), source, e.getKey(), safe(e.getValue(), supplier), executor)) + .collect(Collectors.toList()); + taskBatcher.submitTasks(safeTasks, config.timeout()); + } catch (OpenSearchRejectedExecutionException e) { + // ignore cases where we are shutting down..., there is really nothing interesting + // to be done here... + if (!lifecycle.stoppedOrClosed()) { + throw e; + } + } + } + + public ClusterStateStats getClusterStateStats() { + return stateStats; + } + } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java index b4f2250f6dec9..ef27dde622b83 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterService.java @@ -41,7 +41,6 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.LocalNodeClusterManagerListener; -import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.OperationRouting; @@ -247,25 +246,10 @@ public void addLocalNodeClusterManagerListener(LocalNodeClusterManagerListener l clusterApplierService.addLocalNodeClusterManagerListener(listener); } - /** - * Add a listener for on/off local node cluster-manager events - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #addLocalNodeClusterManagerListener} - */ - @Deprecated - public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { - addLocalNodeClusterManagerListener(listener); - } - public ClusterManagerService getClusterManagerService() { return clusterManagerService; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerService()} */ - @Deprecated - public MasterService getMasterService() { - return clusterManagerService; - } - /** * Getter and Setter for IndexingPressureService, This method exposes IndexingPressureService stats to other plugins for usage. * Although Indexing Pressure instances can be accessed via Node and NodeService class but none of them are @@ -291,12 +275,6 @@ public static boolean assertClusterOrClusterManagerStateThread() { return true; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertClusterOrClusterManagerStateThread} */ - @Deprecated - public static boolean assertClusterOrMasterStateThread() { - return assertClusterOrClusterManagerStateThread(); - } - public ClusterName getClusterName() { return clusterName; } diff --git a/server/src/main/java/org/opensearch/cluster/service/MasterService.java b/server/src/main/java/org/opensearch/cluster/service/MasterService.java deleted file mode 100644 index 455e7301a490d..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/service/MasterService.java +++ /dev/null @@ -1,1050 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster.service; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.Version; -import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.cluster.AckedClusterStateTaskListener; -import org.opensearch.cluster.ClusterChangedEvent; -import org.opensearch.cluster.ClusterManagerMetrics; -import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.ClusterState.Builder; -import org.opensearch.cluster.ClusterStateTaskConfig; -import org.opensearch.cluster.ClusterStateTaskExecutor; -import org.opensearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; -import org.opensearch.cluster.ClusterStateTaskListener; -import org.opensearch.cluster.coordination.ClusterStatePublisher; -import org.opensearch.cluster.coordination.FailedToCommitClusterStateException; -import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.DiscoveryNodes; -import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.common.Nullable; -import org.opensearch.common.Priority; -import org.opensearch.common.annotation.DeprecatedApi; -import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.CountDown; -import org.opensearch.common.util.concurrent.FutureUtils; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.util.concurrent.ThreadContextAccess; -import org.opensearch.core.Assertions; -import org.opensearch.core.common.text.Text; -import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.discovery.Discovery; -import org.opensearch.node.Node; -import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; -import org.opensearch.telemetry.metrics.tags.Tags; -import org.opensearch.threadpool.Scheduler; -import org.opensearch.threadpool.ThreadPool; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.TimeUnit; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; - -/** - * Main Master Node Service - * - * @opensearch.api - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link ClusterManagerService}. - */ -@Deprecated -@DeprecatedApi(since = "2.2.0") -public class MasterService extends AbstractLifecycleComponent { - private static final Logger logger = LogManager.getLogger(MasterService.class); - - public static final Setting MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting( - "cluster.service.slow_master_task_logging_threshold", - TimeValue.timeValueSeconds(10), - Setting.Property.Dynamic, - Setting.Property.NodeScope, - Setting.Property.Deprecated - ); - // The setting below is going to replace the above. - // To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. - public static final Setting CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting( - "cluster.service.slow_cluster_manager_task_logging_threshold", - MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - Setting.Property.Dynamic, - Setting.Property.NodeScope - ); - - static final String CLUSTER_MANAGER_UPDATE_THREAD_NAME = "clusterManagerService#updateTask"; - - /** - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #CLUSTER_MANAGER_UPDATE_THREAD_NAME} - */ - @Deprecated - static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; - - ClusterStatePublisher clusterStatePublisher; - - private final String nodeName; - - private java.util.function.Supplier clusterStateSupplier; - - private volatile TimeValue slowTaskLoggingThreshold; - - protected final ThreadPool threadPool; - - private volatile PrioritizedOpenSearchThreadPoolExecutor threadPoolExecutor; - private volatile Batcher taskBatcher; - protected final ClusterManagerTaskThrottler clusterManagerTaskThrottler; - private final ClusterManagerThrottlingStats throttlingStats; - private final ClusterStateStats stateStats; - private final ClusterManagerMetrics clusterManagerMetrics; - - public MasterService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool) { - this(settings, clusterSettings, threadPool, new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE)); - } - - public MasterService( - Settings settings, - ClusterSettings clusterSettings, - ThreadPool threadPool, - ClusterManagerMetrics clusterManagerMetrics - ) { - this.nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); - - this.slowTaskLoggingThreshold = CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings); - clusterSettings.addSettingsUpdateConsumer( - CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - this::setSlowTaskLoggingThreshold - ); - - this.throttlingStats = new ClusterManagerThrottlingStats(); - this.clusterManagerTaskThrottler = new ClusterManagerTaskThrottler( - settings, - clusterSettings, - this::getMinNodeVersion, - throttlingStats - ); - this.stateStats = new ClusterStateStats(); - this.threadPool = threadPool; - this.clusterManagerMetrics = clusterManagerMetrics; - } - - private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) { - this.slowTaskLoggingThreshold = slowTaskLoggingThreshold; - } - - public synchronized void setClusterStatePublisher(ClusterStatePublisher publisher) { - clusterStatePublisher = publisher; - } - - public synchronized void setClusterStateSupplier(java.util.function.Supplier clusterStateSupplier) { - this.clusterStateSupplier = clusterStateSupplier; - } - - @Override - protected synchronized void doStart() { - Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting"); - Objects.requireNonNull(clusterStateSupplier, "please set a cluster state supplier before starting"); - threadPoolExecutor = createThreadPoolExecutor(); - taskBatcher = new Batcher(logger, threadPoolExecutor, clusterManagerTaskThrottler); - } - - protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { - return OpenSearchExecutors.newSinglePrioritizing( - nodeName + "/" + CLUSTER_MANAGER_UPDATE_THREAD_NAME, - daemonThreadFactory(nodeName, CLUSTER_MANAGER_UPDATE_THREAD_NAME), - threadPool.getThreadContext(), - threadPool.scheduler() - ); - } - - @SuppressWarnings("unchecked") - class Batcher extends TaskBatcher { - - Batcher(Logger logger, PrioritizedOpenSearchThreadPoolExecutor threadExecutor, TaskBatcherListener taskBatcherListener) { - super(logger, threadExecutor, taskBatcherListener); - } - - @Override - protected void onTimeout(List tasks, TimeValue timeout) { - threadPool.generic() - .execute( - () -> tasks.forEach( - task -> ((UpdateTask) task).listener.onFailure( - task.source, - new ProcessClusterEventTimeoutException(timeout, task.source) - ) - ) - ); - } - - @Override - protected void run(Object batchingKey, List tasks, Function taskSummaryGenerator) { - ClusterStateTaskExecutor taskExecutor = (ClusterStateTaskExecutor) batchingKey; - List updateTasks = (List) tasks; - runTasks(new TaskInputs(taskExecutor, updateTasks, taskSummaryGenerator)); - } - - class UpdateTask extends BatchedTask { - final ClusterStateTaskListener listener; - - UpdateTask( - Priority priority, - String source, - Object task, - ClusterStateTaskListener listener, - ClusterStateTaskExecutor executor - ) { - super(priority, source, executor, task); - this.listener = listener; - } - - @Override - public String describeTasks(List tasks) { - return ((ClusterStateTaskExecutor) batchingKey).describeTasks( - tasks.stream().map(BatchedTask::getTask).collect(Collectors.toList()) - ); - } - } - } - - @Override - protected synchronized void doStop() { - ThreadPool.terminate(threadPoolExecutor, 10, TimeUnit.SECONDS); - } - - @Override - protected synchronized void doClose() {} - - /** - * The current cluster state exposed by the discovery layer. Package-visible for tests. - */ - ClusterState state() { - return clusterStateSupplier.get(); - } - - private static boolean isClusterManagerUpdateThread() { - return Thread.currentThread().getName().contains(CLUSTER_MANAGER_UPDATE_THREAD_NAME) - || Thread.currentThread().getName().contains(MASTER_UPDATE_THREAD_NAME); - } - - public static boolean assertClusterManagerUpdateThread() { - assert isClusterManagerUpdateThread() : "not called from the cluster-manager service thread"; - return true; - } - - public static boolean assertNotClusterManagerUpdateThread(String reason) { - assert isClusterManagerUpdateThread() == false : "Expected current thread [" - + Thread.currentThread() - + "] to not be the cluster-manager service thread. Reason: [" - + reason - + "]"; - return true; - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertClusterManagerUpdateThread()} */ - @Deprecated - public static boolean assertMasterUpdateThread() { - return assertClusterManagerUpdateThread(); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertNotClusterManagerUpdateThread(String)} */ - @Deprecated - public static boolean assertNotMasterUpdateThread(String reason) { - return assertNotClusterManagerUpdateThread(reason); - } - - private void runTasks(TaskInputs taskInputs) { - final String summary; - if (logger.isTraceEnabled()) { - summary = taskInputs.taskSummaryGenerator.apply(true); - } else { - summary = taskInputs.taskSummaryGenerator.apply(false); - } - - if (!lifecycle.started()) { - logger.debug("processing [{}]: ignoring, cluster-manager service not started", summary); - return; - } - - if (logger.isTraceEnabled()) { - logger.trace("executing cluster state update for [{}]", summary); - } else { - logger.debug("executing cluster state update for [{}]", summary); - } - - final ClusterState previousClusterState = state(); - - if (!previousClusterState.nodes().isLocalNodeElectedClusterManager() && taskInputs.runOnlyWhenClusterManager()) { - logger.debug("failing [{}]: local node is no longer cluster-manager", summary); - taskInputs.onNoLongerClusterManager(); - return; - } - - final long computationStartTime = threadPool.preciseRelativeTimeInNanos(); - final TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterState, summary); - taskOutputs.notifyFailedTasks(); - final TimeValue computationTime = getTimeSince(computationStartTime); - logExecutionTime(computationTime, "compute cluster state update", summary); - - clusterManagerMetrics.recordLatency( - clusterManagerMetrics.clusterStateComputeHistogram, - (double) computationTime.getMillis(), - Optional.of(Tags.create().addTag("Operation", taskInputs.executor.getClass().getSimpleName())) - ); - - if (taskOutputs.clusterStateUnchanged()) { - final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); - taskOutputs.notifySuccessfulTasksOnUnchangedClusterState(); - final TimeValue executionTime = getTimeSince(notificationStartTime); - logExecutionTime(executionTime, "notify listeners on unchanged cluster state", summary); - } else { - final ClusterState newClusterState = taskOutputs.newClusterState; - if (logger.isTraceEnabled()) { - logger.trace("cluster state updated, source [{}]\n{}", summary, newClusterState); - } else { - logger.debug("cluster state updated, version [{}], source [{}]", newClusterState.version(), summary); - } - final long publicationStartTime = threadPool.preciseRelativeTimeInNanos(); - try { - ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(summary, newClusterState, previousClusterState); - // new cluster state, notify all listeners - final DiscoveryNodes.Delta nodesDelta = clusterChangedEvent.nodesDelta(); - if (nodesDelta.hasChanges() && logger.isInfoEnabled()) { - String nodesDeltaSummary = nodesDelta.shortSummary(); - if (nodesDeltaSummary.length() > 0) { - logger.info( - "{}, term: {}, version: {}, delta: {}", - summary, - newClusterState.term(), - newClusterState.version(), - nodesDeltaSummary - ); - } - } - - logger.debug("publishing cluster state version [{}]", newClusterState.version()); - publish(clusterChangedEvent, taskOutputs, publicationStartTime); - } catch (Exception e) { - handleException(summary, publicationStartTime, newClusterState, e); - } - } - } - - private TimeValue getTimeSince(long startTimeNanos) { - return TimeValue.timeValueMillis(TimeValue.nsecToMSec(threadPool.preciseRelativeTimeInNanos() - startTimeNanos)); - } - - protected void publish(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeNanos) { - final PlainActionFuture fut = new PlainActionFuture() { - @Override - protected boolean blockingAllowed() { - return isClusterManagerUpdateThread() || super.blockingAllowed(); - } - }; - clusterStatePublisher.publish(clusterChangedEvent, fut, taskOutputs.createAckListener(threadPool, clusterChangedEvent.state())); - - // indefinitely wait for publication to complete - try { - FutureUtils.get(fut); - onPublicationSuccess(clusterChangedEvent, taskOutputs); - final long durationMillis = getTimeSince(startTimeNanos).millis(); - stateStats.stateUpdateTook(durationMillis); - stateStats.stateUpdated(); - clusterManagerMetrics.recordLatency(clusterManagerMetrics.clusterStatePublishHistogram, (double) durationMillis); - } catch (Exception e) { - stateStats.stateUpdateFailed(); - onPublicationFailed(clusterChangedEvent, taskOutputs, startTimeNanos, e); - } - } - - void onPublicationSuccess(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs) { - final long notificationStartTime = threadPool.preciseRelativeTimeInNanos(); - taskOutputs.processedDifferentClusterState(clusterChangedEvent.previousState(), clusterChangedEvent.state()); - - try { - taskOutputs.clusterStatePublished(clusterChangedEvent); - } catch (Exception e) { - logger.error( - () -> new ParameterizedMessage( - "exception thrown while notifying executor of new cluster state publication [{}]", - clusterChangedEvent.source() - ), - e - ); - } - final TimeValue executionTime = getTimeSince(notificationStartTime); - logExecutionTime( - executionTime, - "notify listeners on successful publication of cluster state (version: " - + clusterChangedEvent.state().version() - + ", uuid: " - + clusterChangedEvent.state().stateUUID() - + ')', - clusterChangedEvent.source() - ); - } - - void onPublicationFailed(ClusterChangedEvent clusterChangedEvent, TaskOutputs taskOutputs, long startTimeMillis, Exception exception) { - if (exception instanceof FailedToCommitClusterStateException) { - final long version = clusterChangedEvent.state().version(); - logger.warn( - () -> new ParameterizedMessage( - "failing [{}]: failed to commit cluster state version [{}]", - clusterChangedEvent.source(), - version - ), - exception - ); - taskOutputs.publishingFailed((FailedToCommitClusterStateException) exception); - } else { - handleException(clusterChangedEvent.source(), startTimeMillis, clusterChangedEvent.state(), exception); - } - } - - private void handleException(String summary, long startTimeMillis, ClusterState newClusterState, Exception e) { - final TimeValue executionTime = getTimeSince(startTimeMillis); - final long version = newClusterState.version(); - final String stateUUID = newClusterState.stateUUID(); - final String fullState = newClusterState.toString(); - logger.warn( - new ParameterizedMessage( - "took [{}] and then failed to publish updated cluster state (version: {}, uuid: {}) for [{}]:\n{}", - executionTime, - version, - stateUUID, - summary, - fullState - ), - e - ); - // TODO: do we want to call updateTask.onFailure here? - } - - private TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, String taskSummary) { - ClusterTasksResult clusterTasksResult = executeTasks(taskInputs, previousClusterState, taskSummary); - ClusterState newClusterState = patchVersions(previousClusterState, clusterTasksResult); - return new TaskOutputs( - taskInputs, - previousClusterState, - newClusterState, - getNonFailedTasks(taskInputs, clusterTasksResult), - clusterTasksResult.executionResults - ); - } - - private ClusterState patchVersions(ClusterState previousClusterState, ClusterTasksResult executionResult) { - ClusterState newClusterState = executionResult.resultingState; - - if (previousClusterState != newClusterState) { - // only the cluster-manager controls the version numbers - Builder builder = incrementVersion(newClusterState); - if (previousClusterState.routingTable() != newClusterState.routingTable()) { - builder.routingTable( - RoutingTable.builder(newClusterState.routingTable()).version(newClusterState.routingTable().version() + 1).build() - ); - } - if (previousClusterState.metadata() != newClusterState.metadata()) { - builder.metadata(Metadata.builder(newClusterState.metadata()).version(newClusterState.metadata().version() + 1)); - } - - newClusterState = builder.build(); - } - - return newClusterState; - } - - public Builder incrementVersion(ClusterState clusterState) { - return ClusterState.builder(clusterState).incrementVersion(); - } - - /** - * Submits a cluster state update task; unlike {@link #submitStateUpdateTask(String, Object, ClusterStateTaskConfig, - * ClusterStateTaskExecutor, ClusterStateTaskListener)}, submitted updates will not be batched. - * - * @param source the source of the cluster state update task - * @param updateTask the full context for the cluster state update - * task - */ - public & ClusterStateTaskListener> void submitStateUpdateTask( - String source, - T updateTask - ) { - submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask); - } - - /** - * Submits a cluster state update task; submitted updates will be - * batched across the same instance of executor. The exact batching - * semantics depend on the underlying implementation but a rough - * guideline is that if the update task is submitted while there - * are pending update tasks for the same executor, these update - * tasks will all be executed on the executor in a single batch - * - * @param source the source of the cluster state update task - * @param task the state needed for the cluster state update task - * @param config the cluster state update task configuration - * @param executor the cluster state update task executor; tasks - * that share the same executor will be executed - * batches on this executor - * @param listener callback after the cluster state update task - * completes - * @param the type of the cluster state update task state - */ - public void submitStateUpdateTask( - String source, - T task, - ClusterStateTaskConfig config, - ClusterStateTaskExecutor executor, - ClusterStateTaskListener listener - ) { - submitStateUpdateTasks(source, Collections.singletonMap(task, listener), config, executor); - } - - /** - * Output created by executing a set of tasks provided as TaskInputs - */ - class TaskOutputs { - final TaskInputs taskInputs; - final ClusterState previousClusterState; - final ClusterState newClusterState; - final List nonFailedTasks; - final Map executionResults; - - TaskOutputs( - TaskInputs taskInputs, - ClusterState previousClusterState, - ClusterState newClusterState, - List nonFailedTasks, - Map executionResults - ) { - this.taskInputs = taskInputs; - this.previousClusterState = previousClusterState; - this.newClusterState = newClusterState; - this.nonFailedTasks = nonFailedTasks; - this.executionResults = executionResults; - } - - void publishingFailed(FailedToCommitClusterStateException t) { - nonFailedTasks.forEach(task -> task.listener.onFailure(task.source(), t)); - } - - void processedDifferentClusterState(ClusterState previousClusterState, ClusterState newClusterState) { - nonFailedTasks.forEach(task -> task.listener.clusterStateProcessed(task.source(), previousClusterState, newClusterState)); - } - - void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { - taskInputs.executor.clusterStatePublished(clusterChangedEvent); - } - - Discovery.AckListener createAckListener(ThreadPool threadPool, ClusterState newClusterState) { - return new DelegatingAckListener( - nonFailedTasks.stream() - .filter(task -> task.listener instanceof AckedClusterStateTaskListener) - .map( - task -> new AckCountDownListener( - (AckedClusterStateTaskListener) task.listener, - newClusterState.version(), - newClusterState.nodes(), - threadPool - ) - ) - .collect(Collectors.toList()) - ); - } - - boolean clusterStateUnchanged() { - return previousClusterState == newClusterState; - } - - void notifyFailedTasks() { - // fail all tasks that have failed - for (Batcher.UpdateTask updateTask : taskInputs.updateTasks) { - assert executionResults.containsKey(updateTask.task) : "missing " + updateTask; - final ClusterStateTaskExecutor.TaskResult taskResult = executionResults.get(updateTask.task); - if (taskResult.isSuccess() == false) { - updateTask.listener.onFailure(updateTask.source(), taskResult.getFailure()); - } - } - } - - void notifySuccessfulTasksOnUnchangedClusterState() { - nonFailedTasks.forEach(task -> { - if (task.listener instanceof AckedClusterStateTaskListener) { - // no need to wait for ack if nothing changed, the update can be counted as acknowledged - ((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null); - } - task.listener.clusterStateProcessed(task.source(), newClusterState, newClusterState); - }); - } - } - - /** - * Returns the tasks that are pending. - */ - public List pendingTasks() { - return Arrays.stream(threadPoolExecutor.getPending()).map(pending -> { - assert pending.task instanceof SourcePrioritizedRunnable - : "thread pool executor should only use SourcePrioritizedRunnable instances but found: " - + pending.task.getClass().getName(); - SourcePrioritizedRunnable task = (SourcePrioritizedRunnable) pending.task; - return new PendingClusterTask( - pending.insertionOrder, - pending.priority, - new Text(task.source()), - task.getAgeInMillis(), - pending.executing - ); - }).collect(Collectors.toList()); - } - - /** - * Returns the number of throttled pending tasks. - */ - public long numberOfThrottledPendingTasks() { - return throttlingStats.getTotalThrottledTaskCount(); - } - - /** - * Returns the stats of throttled pending tasks. - */ - public ClusterManagerThrottlingStats getThrottlingStats() { - return throttlingStats; - } - - /** - * Returns the min version of nodes in cluster - */ - public Version getMinNodeVersion() { - return state().getNodes().getMinNodeVersion(); - } - - /** - * Returns the number of currently pending tasks. - */ - public int numberOfPendingTasks() { - return threadPoolExecutor.getNumberOfPendingTasks(); - } - - /** - * Returns the maximum wait time for tasks in the queue - * - * @return A zero time value if the queue is empty, otherwise the time value oldest task waiting in the queue - */ - public TimeValue getMaxTaskWaitTime() { - return threadPoolExecutor.getMaxTaskWaitTime(); - } - - private SafeClusterStateTaskListener safe(ClusterStateTaskListener listener, Supplier contextSupplier) { - if (listener instanceof AckedClusterStateTaskListener) { - return new SafeAckedClusterStateTaskListener((AckedClusterStateTaskListener) listener, contextSupplier, logger); - } else { - return new SafeClusterStateTaskListener(listener, contextSupplier, logger); - } - } - - private static class SafeClusterStateTaskListener implements ClusterStateTaskListener { - private final ClusterStateTaskListener listener; - protected final Supplier context; - private final Logger logger; - - SafeClusterStateTaskListener(ClusterStateTaskListener listener, Supplier context, Logger logger) { - this.listener = listener; - this.context = context; - this.logger = logger; - } - - @Override - public void onFailure(String source, Exception e) { - try (ThreadContext.StoredContext ignore = context.get()) { - listener.onFailure(source, e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.error(() -> new ParameterizedMessage("exception thrown by listener notifying of failure from [{}]", source), inner); - } - } - - @Override - public void onNoLongerClusterManager(String source) { - try (ThreadContext.StoredContext ignore = context.get()) { - listener.onNoLongerClusterManager(source); - } catch (Exception e) { - logger.error( - () -> new ParameterizedMessage( - "exception thrown by listener while notifying no longer cluster-manager from [{}]", - source - ), - e - ); - } - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - try (ThreadContext.StoredContext ignore = context.get()) { - listener.clusterStateProcessed(source, oldState, newState); - } catch (Exception e) { - logger.error( - () -> new ParameterizedMessage( - "exception thrown by listener while notifying of cluster state processed from [{}], old cluster state:\n" - + "{}\nnew cluster state:\n{}", - source, - oldState, - newState - ), - e - ); - } - } - } - - private static class SafeAckedClusterStateTaskListener extends SafeClusterStateTaskListener implements AckedClusterStateTaskListener { - private final AckedClusterStateTaskListener listener; - private final Logger logger; - - SafeAckedClusterStateTaskListener( - AckedClusterStateTaskListener listener, - Supplier context, - Logger logger - ) { - super(listener, context, logger); - this.listener = listener; - this.logger = logger; - } - - @Override - public boolean mustAck(DiscoveryNode discoveryNode) { - return listener.mustAck(discoveryNode); - } - - @Override - public void onAllNodesAcked(@Nullable Exception e) { - try (ThreadContext.StoredContext ignore = context.get()) { - listener.onAllNodesAcked(e); - } catch (Exception inner) { - inner.addSuppressed(e); - logger.error("exception thrown by listener while notifying on all nodes acked", inner); - } - } - - @Override - public void onAckTimeout() { - try (ThreadContext.StoredContext ignore = context.get()) { - listener.onAckTimeout(); - } catch (Exception e) { - logger.error("exception thrown by listener while notifying on ack timeout", e); - } - } - - @Override - public TimeValue ackTimeout() { - return listener.ackTimeout(); - } - } - - private void logExecutionTime(TimeValue executionTime, String activity, String summary) { - if (executionTime.getMillis() > slowTaskLoggingThreshold.getMillis()) { - logger.warn("took [{}], which is over [{}], to {} for [{}]", executionTime, slowTaskLoggingThreshold, activity, summary); - } else { - logger.debug("took [{}] to {} for [{}]", executionTime, activity, summary); - } - } - - private static class DelegatingAckListener implements Discovery.AckListener { - - private final List listeners; - - private DelegatingAckListener(List listeners) { - this.listeners = listeners; - } - - @Override - public void onCommit(TimeValue commitTime) { - for (Discovery.AckListener listener : listeners) { - listener.onCommit(commitTime); - } - } - - @Override - public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { - for (Discovery.AckListener listener : listeners) { - listener.onNodeAck(node, e); - } - } - } - - private static class AckCountDownListener implements Discovery.AckListener { - - private static final Logger logger = LogManager.getLogger(AckCountDownListener.class); - - private final AckedClusterStateTaskListener ackedTaskListener; - private final CountDown countDown; - private final DiscoveryNode clusterManagerNode; - private final ThreadPool threadPool; - private final long clusterStateVersion; - private volatile Scheduler.Cancellable ackTimeoutCallback; - private Exception lastFailure; - - AckCountDownListener( - AckedClusterStateTaskListener ackedTaskListener, - long clusterStateVersion, - DiscoveryNodes nodes, - ThreadPool threadPool - ) { - this.ackedTaskListener = ackedTaskListener; - this.clusterStateVersion = clusterStateVersion; - this.threadPool = threadPool; - this.clusterManagerNode = nodes.getClusterManagerNode(); - int countDown = 0; - for (DiscoveryNode node : nodes) { - // we always wait for at least the cluster-manager node - if (node.equals(clusterManagerNode) || ackedTaskListener.mustAck(node)) { - countDown++; - } - } - logger.trace("expecting {} acknowledgements for cluster_state update (version: {})", countDown, clusterStateVersion); - this.countDown = new CountDown(countDown + 1); // we also wait for onCommit to be called - } - - @Override - public void onCommit(TimeValue commitTime) { - TimeValue ackTimeout = ackedTaskListener.ackTimeout(); - if (ackTimeout == null) { - ackTimeout = TimeValue.ZERO; - } - final TimeValue timeLeft = TimeValue.timeValueNanos(Math.max(0, ackTimeout.nanos() - commitTime.nanos())); - if (timeLeft.nanos() == 0L) { - onTimeout(); - } else if (countDown.countDown()) { - finish(); - } else { - this.ackTimeoutCallback = threadPool.schedule(this::onTimeout, timeLeft, ThreadPool.Names.GENERIC); - // re-check if onNodeAck has not completed while we were scheduling the timeout - if (countDown.isCountedDown()) { - ackTimeoutCallback.cancel(); - } - } - } - - @Override - public void onNodeAck(DiscoveryNode node, @Nullable Exception e) { - if (node.equals(clusterManagerNode) == false && ackedTaskListener.mustAck(node) == false) { - return; - } - if (e == null) { - logger.trace("ack received from node [{}], cluster_state update (version: {})", node, clusterStateVersion); - } else { - this.lastFailure = e; - logger.debug( - () -> new ParameterizedMessage( - "ack received from node [{}], cluster_state update (version: {})", - node, - clusterStateVersion - ), - e - ); - } - - if (countDown.countDown()) { - finish(); - } - } - - private void finish() { - logger.trace("all expected nodes acknowledged cluster_state update (version: {})", clusterStateVersion); - if (ackTimeoutCallback != null) { - ackTimeoutCallback.cancel(); - } - ackedTaskListener.onAllNodesAcked(lastFailure); - } - - public void onTimeout() { - if (countDown.fastForward()) { - logger.trace("timeout waiting for acknowledgement for cluster_state update (version: {})", clusterStateVersion); - ackedTaskListener.onAckTimeout(); - } - } - } - - private ClusterTasksResult executeTasks(TaskInputs taskInputs, ClusterState previousClusterState, String taskSummary) { - ClusterTasksResult clusterTasksResult; - try { - List inputs = taskInputs.updateTasks.stream().map(tUpdateTask -> tUpdateTask.task).collect(Collectors.toList()); - clusterTasksResult = taskInputs.executor.execute(previousClusterState, inputs); - if (previousClusterState != clusterTasksResult.resultingState - && previousClusterState.nodes().isLocalNodeElectedClusterManager() - && (clusterTasksResult.resultingState.nodes().isLocalNodeElectedClusterManager() == false)) { - throw new AssertionError("update task submitted to ClusterManagerService cannot remove cluster-manager"); - } - } catch (Exception e) { - logger.trace( - () -> new ParameterizedMessage( - "failed to execute cluster state update (on version: [{}], uuid: [{}]) for [{}]\n{}{}{}", - previousClusterState.version(), - previousClusterState.stateUUID(), - taskSummary, - previousClusterState.nodes(), - previousClusterState.routingTable(), - previousClusterState.getRoutingNodes() - ), // may be expensive => construct message lazily - e - ); - clusterTasksResult = ClusterTasksResult.builder() - .failures(taskInputs.updateTasks.stream().map(updateTask -> updateTask.task)::iterator, e) - .build(previousClusterState); - } - - assert clusterTasksResult.executionResults != null; - assert clusterTasksResult.executionResults.size() == taskInputs.updateTasks.size() : String.format( - Locale.ROOT, - "expected [%d] task result%s but was [%d]", - taskInputs.updateTasks.size(), - taskInputs.updateTasks.size() == 1 ? "" : "s", - clusterTasksResult.executionResults.size() - ); - if (Assertions.ENABLED) { - ClusterTasksResult finalClusterTasksResult = clusterTasksResult; - taskInputs.updateTasks.forEach(updateTask -> { - assert finalClusterTasksResult.executionResults.containsKey(updateTask.task) : "missing task result for " + updateTask; - }); - } - - return clusterTasksResult; - } - - private List getNonFailedTasks(TaskInputs taskInputs, ClusterTasksResult clusterTasksResult) { - return taskInputs.updateTasks.stream().filter(updateTask -> { - assert clusterTasksResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask; - final ClusterStateTaskExecutor.TaskResult taskResult = clusterTasksResult.executionResults.get(updateTask.task); - return taskResult.isSuccess(); - }).collect(Collectors.toList()); - } - - /** - * Represents a set of tasks to be processed together with their executor - */ - private class TaskInputs { - - final List updateTasks; - final ClusterStateTaskExecutor executor; - final Function taskSummaryGenerator; - - TaskInputs( - ClusterStateTaskExecutor executor, - List updateTasks, - final Function taskSummaryGenerator - ) { - this.executor = executor; - this.updateTasks = updateTasks; - this.taskSummaryGenerator = taskSummaryGenerator; - } - - boolean runOnlyWhenClusterManager() { - return executor.runOnlyOnClusterManager(); - } - - void onNoLongerClusterManager() { - updateTasks.forEach(task -> task.listener.onNoLongerClusterManager(task.source())); - } - } - - /** - * Functionality for register task key to cluster manager node. - * - * @param taskKey - task key of task - * @param throttlingEnabled - throttling is enabled for task or not i.e does data node perform retries on it or not - * @return throttling task key which needs to be passed while submitting task to cluster manager - */ - public ClusterManagerTaskThrottler.ThrottlingKey registerClusterManagerTask(String taskKey, boolean throttlingEnabled) { - return clusterManagerTaskThrottler.registerClusterManagerTask(taskKey, throttlingEnabled); - } - - /** - * Submits a batch of cluster state update tasks; submitted updates are guaranteed to be processed together, - * potentially with more tasks of the same executor. - * - * @param source the source of the cluster state update task - * @param tasks a map of update tasks and their corresponding listeners - * @param config the cluster state update task configuration - * @param executor the cluster state update task executor; tasks - * that share the same executor will be executed - * batches on this executor - * @param the type of the cluster state update task state - */ - public void submitStateUpdateTasks( - final String source, - final Map tasks, - final ClusterStateTaskConfig config, - final ClusterStateTaskExecutor executor - ) { - if (!lifecycle.started()) { - return; - } - final ThreadContext threadContext = threadPool.getThreadContext(); - final Supplier supplier = threadContext.newRestorableContext(true); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - ThreadContextAccess.doPrivilegedVoid(threadContext::markAsSystemContext); - - List safeTasks = tasks.entrySet() - .stream() - .map(e -> taskBatcher.new UpdateTask(config.priority(), source, e.getKey(), safe(e.getValue(), supplier), executor)) - .collect(Collectors.toList()); - taskBatcher.submitTasks(safeTasks, config.timeout()); - } catch (OpenSearchRejectedExecutionException e) { - // ignore cases where we are shutting down..., there is really nothing interesting - // to be done here... - if (!lifecycle.stoppedOrClosed()) { - throw e; - } - } - } - - public ClusterStateStats getClusterStateStats() { - return stateStats; - } - -} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index f554e6d1dc591..f8c5d8e3b2480 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -412,7 +412,6 @@ public void apply(Settings value, Settings current, Settings previous) { IndexModule.NODE_STORE_ALLOW_MMAP, ClusterApplierService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, ClusterService.USER_DEFINED_METADATA, - ClusterManagerService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, // deprecated ClusterManagerService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, IngestService.MAX_NUMBER_OF_INGEST_PROCESSORS, SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 3a7988bcd2bda..a1c914c69ce21 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -49,8 +49,8 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.service.ClusterManagerService; +import org.opensearch.cluster.service.ClusterManagerServiceTests; import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; -import org.opensearch.cluster.service.MasterServiceTests; import org.opensearch.common.Randomness; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -549,9 +549,11 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { ) ); - assertTrue(MasterServiceTests.discoveryState(clusterManagerService).getVotingConfigExclusions().stream().anyMatch(exclusion -> { - return "knownNodeName".equals(exclusion.getNodeName()) && "newNodeId".equals(exclusion.getNodeId()); - })); + assertTrue( + ClusterManagerServiceTests.discoveryState(clusterManagerService).getVotingConfigExclusions().stream().anyMatch(exclusion -> { + return "knownNodeName".equals(exclusion.getNodeName()) && "newNodeId".equals(exclusion.getNodeId()); + }) + ); } private ClusterState buildStateWithVotingConfigExclusion( @@ -777,7 +779,7 @@ public void testConcurrentJoining() { throw new RuntimeException(e); } - assertTrue(MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); + assertTrue(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); for (DiscoveryNode successfulNode : successfulNodes) { assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode)); @@ -861,11 +863,11 @@ public void testJoinFailsWhenDecommissioned() { } private boolean isLocalNodeElectedMaster() { - return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); + return ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } private boolean clusterStateHasNode(DiscoveryNode node) { - return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); + return node.equals(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } private static ClusterState initialStateWithDecommissionedAttribute( diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java similarity index 92% rename from server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java rename to server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java index bb9e34d93431f..d1b06d24cc797 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java @@ -108,14 +108,14 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -public class MasterServiceTests extends OpenSearchTestCase { +public class ClusterManagerServiceTests extends OpenSearchTestCase { private static ThreadPool threadPool; private static long timeDiffInMillis; @BeforeClass public static void createThreadPool() { - threadPool = new TestThreadPool(MasterServiceTests.class.getName()) { + threadPool = new TestThreadPool(ClusterManagerServiceTests.class.getName()) { @Override public long preciseRelativeTimeInNanos() { return timeDiffInMillis * TimeValue.NSEC_PER_MSEC; @@ -149,7 +149,7 @@ private ClusterManagerService createClusterManagerService( if (metricsRegistryOptional != null && metricsRegistryOptional.isPresent()) { clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -159,7 +159,7 @@ private ClusterManagerService createClusterManagerService( } else { clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -167,7 +167,7 @@ private ClusterManagerService createClusterManagerService( ); } - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterManagerServiceTests.class.getSimpleName())) .nodes( DiscoveryNodes.builder() .add(localNode) @@ -377,11 +377,11 @@ public void onFailure(String source, Exception e) {} @TestLogging(value = "org.opensearch.cluster.service:TRACE", reason = "to ensure that we log cluster state events on TRACE level") public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.TRACE, "executing cluster state update for [test1]" ) @@ -389,7 +389,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [1s] to compute cluster state update for [test1]" ) @@ -397,7 +397,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test1]" ) @@ -406,7 +406,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.TRACE, "executing cluster state update for [test2]" ) @@ -414,7 +414,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 failure", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.TRACE, "failed to execute cluster state update (on version: [*], uuid: [*]) for [test2]*" ) @@ -422,7 +422,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [2s] to compute cluster state update for [test2]" ) @@ -430,7 +430,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [0s] to notify listeners on unchanged cluster state for [test2]" ) @@ -439,7 +439,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.TRACE, "executing cluster state update for [test3]" ) @@ -447,7 +447,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [3s] to compute cluster state update for [test3]" ) @@ -455,7 +455,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, "took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [test3]" ) @@ -464,7 +464,7 @@ public void testClusterStateUpdateLoggingWithTraceEnabled() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.TRACE, "executing cluster state update for [test4]" ) @@ -542,96 +542,96 @@ public void onFailure(String source, Exception e) { @TestLogging(value = "org.opensearch.cluster.service:DEBUG", reason = "to ensure that we log cluster state events on DEBUG level") public void testClusterStateUpdateLoggingWithDebugEnabled() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test1]" + "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test1]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "took [1s] to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test1]" + "took [1s] to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test1]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "took [0s] to notify listeners on unchanged cluster state for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test1]" + "took [0s] to notify listeners on unchanged cluster state for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test1]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test2]" + "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test2]" ) ); mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test2 failure", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "failed to execute cluster state update (on version: [*], uuid: [*]) for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test2]*" + "failed to execute cluster state update (on version: [*], uuid: [*]) for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test2]*" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "took [2s] to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test2]" + "took [2s] to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test2]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "took [0s] to notify listeners on unchanged cluster state for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test2]" + "took [0s] to notify listeners on unchanged cluster state for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test2]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 start", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test3]" + "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test3]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 computation", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "took [3s] to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test3]" + "took [3s] to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test3]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3 notification", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test3]" + "took [4s] to notify listeners on successful publication of cluster state (version: *, uuid: *) for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test3]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.DEBUG, - "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test4]" + "executing cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test4]" ) ); @@ -909,13 +909,13 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } public void testThrottlingForTaskSubmission() throws InterruptedException { - MasterService masterService = createClusterManagerService(true); + ClusterManagerService clusterManagerService = createClusterManagerService(true); int throttlingLimit = randomIntBetween(1, 10); int taskId = 1; final CyclicBarrier barrier = new CyclicBarrier(2); final CountDownLatch latch = new CountDownLatch(1); final String taskName = "test"; - ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = masterService.registerClusterManagerTask(taskName, true); + ClusterManagerTaskThrottler.ThrottlingKey throttlingKey = clusterManagerService.registerClusterManagerTask(taskName, true); class Task { private final int id; @@ -945,7 +945,7 @@ public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) { } } - masterService.clusterManagerTaskThrottler.updateLimit(taskName, throttlingLimit); + clusterManagerService.clusterManagerTaskThrottler.updateLimit(taskName, throttlingLimit); final ClusterStateTaskListener listener = new ClusterStateTaskListener() { @Override @@ -958,7 +958,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS TaskExecutor executor = new TaskExecutor(); // submit one task which will be execution, post that will submit throttlingLimit tasks. try { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( taskName, new Task(taskId++), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -973,7 +973,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS for (int i = 0; i < throttlingLimit; i++) { try { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( taskName, new Task(taskId++), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -988,7 +988,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS // we have one task in execution and tasks in queue so next task should throttled. final AtomicReference assertionRef = new AtomicReference<>(); try { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( taskName, new Task(taskId++), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -999,11 +999,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS assertionRef.set(e); } assertNotNull(assertionRef.get()); - masterService.close(); + clusterManagerService.close(); } public void testThrottlingForMultipleTaskTypes() throws InterruptedException { - MasterService masterService = createClusterManagerService(true); + ClusterManagerService clusterManagerService = createClusterManagerService(true); int throttlingLimitForTask1 = randomIntBetween(1, 5); int throttlingLimitForTask2 = randomIntBetween(1, 5); int throttlingLimitForTask3 = randomIntBetween(1, 5); @@ -1014,9 +1014,9 @@ public void testThrottlingForMultipleTaskTypes() throws InterruptedException { String task2 = "Task2"; String task3 = "Task3"; - ClusterManagerTaskThrottler.ThrottlingKey throttlingKey1 = masterService.registerClusterManagerTask(task1, true); - ClusterManagerTaskThrottler.ThrottlingKey throttlingKey2 = masterService.registerClusterManagerTask(task2, true); - ClusterManagerTaskThrottler.ThrottlingKey throttlingKey3 = masterService.registerClusterManagerTask(task3, true); + ClusterManagerTaskThrottler.ThrottlingKey throttlingKey1 = clusterManagerService.registerClusterManagerTask(task1, true); + ClusterManagerTaskThrottler.ThrottlingKey throttlingKey2 = clusterManagerService.registerClusterManagerTask(task2, true); + ClusterManagerTaskThrottler.ThrottlingKey throttlingKey3 = clusterManagerService.registerClusterManagerTask(task3, true); class Task {} class Task1 extends Task {} class Task2 extends Task {} @@ -1071,8 +1071,8 @@ public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey( } // configuring limits for Task1 and Task3. All task submission of Task2 should pass. - masterService.clusterManagerTaskThrottler.updateLimit(task1, throttlingLimitForTask1); - masterService.clusterManagerTaskThrottler.updateLimit(task3, throttlingLimitForTask3); + clusterManagerService.clusterManagerTaskThrottler.updateLimit(task1, throttlingLimitForTask1); + clusterManagerService.clusterManagerTaskThrottler.updateLimit(task3, throttlingLimitForTask3); final CountDownLatch latch = new CountDownLatch(numberOfTask1 + numberOfTask2 + numberOfTask3); AtomicInteger throttledTask1 = new AtomicInteger(); AtomicInteger throttledTask2 = new AtomicInteger(); @@ -1112,7 +1112,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void run() { try { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( task1, new Task1(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -1132,7 +1132,7 @@ public void run() { @Override public void run() { try { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( task2, new Task2(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -1151,7 +1151,7 @@ public void run() { @Override public void run() { try { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( task3, new Task3(), ClusterStateTaskConfig.build(randomFrom(Priority.values()), new TimeValue(0)), @@ -1175,7 +1175,7 @@ public void run() { assertEquals(numberOfTask2, succeededTask2.get()); assertEquals(0, throttledTask2.get()); assertEquals(numberOfTask3, throttledTask3.get() + timedOutTask3.get() + succeededTask3.get()); - masterService.close(); + clusterManagerService.close(); } public void testBlockingCallInClusterStateTaskListenerFails() throws InterruptedException { @@ -1224,11 +1224,11 @@ public void onFailure(String source, Exception e) {} @TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log cluster state events on WARN level") public void testLongClusterStateUpdateLogging() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test1 shouldn't log because it was fast enough", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "*took*test1*" ) @@ -1236,31 +1236,31 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, - "*took [*], which is over [10s], to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test2]" + "*took [*], which is over [10s], to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test2]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, - "*took [*], which is over [10s], to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test3]" + "*took [*], which is over [10s], to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test3]" ) ); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, - "*took [*], which is over [10s], to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test4]" + "*took [*], which is over [10s], to compute cluster state update for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test4]" ) ); mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test5 should not log despite publishing slowly", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, "*took*test5*" ) @@ -1269,7 +1269,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { try ( ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -1285,7 +1285,9 @@ public void testLongClusterStateUpdateLogging() throws Exception { emptySet(), Version.CURRENT ); - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder( + new ClusterName(ClusterManagerServiceTests.class.getSimpleName()) + ) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); @@ -1426,20 +1428,20 @@ public void onFailure(String source, Exception e) { @TestLogging(value = "org.opensearch.cluster.service:WARN", reason = "to ensure that we log failed cluster state events on WARN level") public void testLongClusterStateUpdateLoggingForFailedPublication() throws Exception { - try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(MasterService.class))) { + try (MockLogAppender mockAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ClusterManagerService.class))) { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1 should log due to slow and failing publication", - MasterService.class.getCanonicalName(), + ClusterManagerService.class.getCanonicalName(), Level.WARN, - "took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [Tasks batched with key: org.opensearch.cluster.service.MasterServiceTests, count:1 and sample tasks: test1]:*" + "took [*] and then failed to publish updated cluster state (version: *, uuid: *) for [Tasks batched with key: org.opensearch.cluster.service.ClusterManagerServiceTests, count:1 and sample tasks: test1]:*" ) ); try ( ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -1455,7 +1457,9 @@ public void testLongClusterStateUpdateLoggingForFailedPublication() throws Excep emptySet(), Version.CURRENT ); - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder( + new ClusterName(ClusterManagerServiceTests.class.getSimpleName()) + ) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); @@ -1529,7 +1533,7 @@ public void testAcking() throws InterruptedException { try ( ClusterManagerService clusterManagerService = new ClusterManagerService( Settings.builder() - .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterManagerServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") .build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), @@ -1538,7 +1542,7 @@ public void testAcking() throws InterruptedException { ) ) { - final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) + final ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterManagerServiceTests.class.getSimpleName())) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).masterNodeId(node1.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); @@ -1658,16 +1662,13 @@ public void onAckTimeout() { verify(clusterStatePublishHistogram, times(1)).record(anyDouble()); } - public void testDeprecatedMasterServiceUpdateTaskThreadName() { - Thread.currentThread().setName(MasterService.MASTER_UPDATE_THREAD_NAME); - assertThat(MasterService.assertClusterManagerUpdateThread(), is(Boolean.TRUE)); - assertThrows(AssertionError.class, () -> MasterService.assertNotClusterManagerUpdateThread("test")); - Thread.currentThread().setName(MasterService.CLUSTER_MANAGER_UPDATE_THREAD_NAME); - assertThat(MasterService.assertClusterManagerUpdateThread(), is(Boolean.TRUE)); - assertThrows(AssertionError.class, () -> MasterService.assertNotClusterManagerUpdateThread("test")); + public void testUpdateTaskThreadName() { + Thread.currentThread().setName(ClusterManagerService.CLUSTER_MANAGER_UPDATE_THREAD_NAME); + assertThat(ClusterManagerService.assertClusterManagerUpdateThread(), is(Boolean.TRUE)); + assertThrows(AssertionError.class, () -> ClusterManagerService.assertNotClusterManagerUpdateThread("test")); Thread.currentThread().setName("test not cluster manager update thread"); - assertThat(MasterService.assertNotClusterManagerUpdateThread("test"), is(Boolean.TRUE)); - assertThrows(AssertionError.class, () -> MasterService.assertClusterManagerUpdateThread()); + assertThat(ClusterManagerService.assertNotClusterManagerUpdateThread("test"), is(Boolean.TRUE)); + assertThrows(AssertionError.class, () -> ClusterManagerService.assertClusterManagerUpdateThread()); } @Timeout(millis = 5_000) diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index 3bd9333dc4168..c536ce2597fd7 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -90,7 +90,7 @@ public void testValidateSettingsForDifferentVersion() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", true); @@ -120,7 +120,7 @@ public void testValidateSettingsForTaskWithoutRetryOnDataNode() { ClusterSettings clusterSettings = new ClusterSettings(Settings.builder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterManagerTaskThrottler throttler = new ClusterManagerTaskThrottler(Settings.EMPTY, clusterSettings, () -> { - return clusterService.getMasterService().getMinNodeVersion(); + return clusterService.getClusterManagerService().getMinNodeVersion(); }, new ClusterManagerThrottlingStats()); throttler.registerClusterManagerTask("put-mapping", false); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java deleted file mode 100644 index 4d88683826af7..0000000000000 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterServiceTests.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.service; - -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; -import org.junit.After; - -import static org.hamcrest.Matchers.equalTo; - -public class ClusterServiceTests extends OpenSearchTestCase { - private final TestThreadPool threadPool = new TestThreadPool(ClusterServiceTests.class.getName()); - - @After - public void terminateThreadPool() { - terminate(threadPool); - } - - public void testDeprecatedGetMasterServiceBWC() { - try ( - ClusterService clusterService = new ClusterService( - Settings.EMPTY, - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - threadPool - ) - ) { - MasterService masterService = clusterService.getMasterService(); - ClusterManagerService clusterManagerService = clusterService.getClusterManagerService(); - assertThat(masterService, equalTo(clusterManagerService)); - } - } -} diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java deleted file mode 100644 index acf089dc43b56..0000000000000 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceRenamedSettingTests.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.service; - -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Arrays; -import java.util.Set; - -/** - * A unit test to validate the former name of the setting 'cluster.service.slow_cluster_manager_task_logging_threshold' still take effect, - * after it is deprecated, so that the backwards compatibility is maintained. - * The test can be removed along with removing support of the deprecated setting. - */ -public class MasterServiceRenamedSettingTests extends OpenSearchTestCase { - - /** - * Validate the both settings are known and supported. - */ - public void testClusterManagerServiceSettingsExist() { - Set> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; - assertTrue( - "Both 'cluster.service.slow_cluster_manager_task_logging_threshold' and its predecessor should be supported built-in settings", - settings.containsAll( - Arrays.asList( - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING - ) - ) - ); - } - - /** - * Validate the default value of the both settings is the same. - */ - public void testSettingFallback() { - assertEquals( - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(Settings.EMPTY) - ); - } - - /** - * Validate the new setting can be configured correctly, and it doesn't impact the old setting. - */ - public void testSettingGetValue() { - Settings settings = Settings.builder().put("cluster.service.slow_cluster_manager_task_logging_threshold", "9s").build(); - assertEquals( - TimeValue.timeValueSeconds(9), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) - ); - assertEquals( - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getDefault(Settings.EMPTY), - MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) - - ); - } - - /** - * Validate the value of the old setting will be applied to the new setting, if the new setting is not configured. - */ - public void testSettingGetValueWithFallback() { - Settings settings = Settings.builder().put("cluster.service.slow_master_task_logging_threshold", "8s").build(); - assertEquals( - TimeValue.timeValueSeconds(8), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) - - ); - assertSettingDeprecationsAndWarnings(new Setting[] { MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); - } - - /** - * Validate the value of the old setting will be ignored, if the new setting is configured. - */ - public void testSettingGetValueWhenBothAreConfigured() { - Settings settings = Settings.builder() - .put("cluster.service.slow_cluster_manager_task_logging_threshold", "9s") - .put("cluster.service.slow_master_task_logging_threshold", "8s") - .build(); - assertEquals( - TimeValue.timeValueSeconds(9), - MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings) - - ); - assertEquals(TimeValue.timeValueSeconds(8), MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings)); - assertSettingDeprecationsAndWarnings(new Setting[] { MasterService.MASTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING }); - } - -} diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 3efcc538a1b25..836a4cbffd54f 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -567,7 +567,7 @@ void stabilise(long stabilisationDurationMillis) { final ClusterNode leader = getAnyLeader(); final long leaderTerm = leader.coordinator.getCurrentTerm(); - final int pendingTaskCount = leader.clusterManagerService.getFakeMasterServicePendingTaskCount(); + final int pendingTaskCount = leader.clusterManagerService.getFakeClusterManagerServicePendingTaskCount(); runFor((pendingTaskCount + 1) * DEFAULT_CLUSTER_STATE_UPDATE_DELAY, "draining task queue"); final Matcher isEqualToLeaderVersion = equalTo(leader.coordinator.getLastAcceptedState().getVersion()); diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java index 64f3dbc4fd967..853d2d77a6fab 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolClusterManagerService.java @@ -110,7 +110,7 @@ public void execute(Runnable command) { }; } - public int getFakeMasterServicePendingTaskCount() { + public int getFakeClusterManagerServicePendingTaskCount() { return pendingTasks.size(); } diff --git a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java b/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java deleted file mode 100644 index 0713432c00189..0000000000000 --- a/test/framework/src/main/java/org/opensearch/cluster/service/FakeThreadPoolMasterService.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.service; - -import org.opensearch.threadpool.ThreadPool; - -import java.util.function.Consumer; - -/** - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link FakeThreadPoolClusterManagerService} - */ -@Deprecated -public class FakeThreadPoolMasterService extends FakeThreadPoolClusterManagerService { - public FakeThreadPoolMasterService( - String nodeName, - String serviceName, - ThreadPool threadPool, - Consumer onTaskAvailableToRun - ) { - super(nodeName, serviceName, threadPool, onTaskAvailableToRun); - } -} diff --git a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java index f0c0e9bc2d589..6ca116a4e3a65 100644 --- a/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java +++ b/test/framework/src/main/java/org/opensearch/test/ClusterServiceUtils.java @@ -49,7 +49,6 @@ import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterManagerService; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.cluster.service.MasterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.node.Node; @@ -90,18 +89,6 @@ public static ClusterManagerService createClusterManagerService(ThreadPool threa return createClusterManagerService(threadPool, initialClusterState); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #createClusterManagerService(ThreadPool, ClusterState)} */ - @Deprecated - public static MasterService createMasterService(ThreadPool threadPool, ClusterState initialClusterState) { - return createClusterManagerService(threadPool, initialClusterState); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #createClusterManagerService(ThreadPool, DiscoveryNode)} */ - @Deprecated - public static MasterService createMasterService(ThreadPool threadPool, DiscoveryNode localNode) { - return createClusterManagerService(threadPool, localNode); - } - public static void setState(ClusterApplierService executor, ClusterState clusterState) { CountDownLatch latch = new CountDownLatch(1); AtomicReference exception = new AtomicReference<>(); diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java b/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java deleted file mode 100644 index bbe99d838f296..0000000000000 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BlockMasterServiceOnMaster.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.test.disruption; - -import java.util.Random; - -/** - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link BlockClusterManagerServiceOnClusterManager} - */ -@Deprecated -public class BlockMasterServiceOnMaster extends BlockClusterManagerServiceOnClusterManager { - public BlockMasterServiceOnMaster(Random random) { - super(random); - } -} diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java b/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java deleted file mode 100644 index 884997123e6a4..0000000000000 --- a/test/framework/src/main/java/org/opensearch/test/disruption/BusyMasterServiceDisruption.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.test.disruption; - -import org.opensearch.common.Priority; - -import java.util.Random; - -/** - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link BusyClusterManagerServiceDisruption} - */ -@Deprecated -public class BusyMasterServiceDisruption extends BusyClusterManagerServiceDisruption { - public BusyMasterServiceDisruption(Random random, Priority priority) { - super(random, priority); - } -} From de59264d70878455be2b98ac2eea4e46753e0944 Mon Sep 17 00:00:00 2001 From: Finn Date: Tue, 28 Jan 2025 12:56:50 -0800 Subject: [PATCH 23/48] Fix auto date histogram rounding assertion bug (#17023) * Add comments explanations for auto date histo increaseRoundingIfNeeded. Signed-off-by: Finn Carroll * Add testFilterRewriteWithTZRoundingRangeAssert() to reproduce auto date histo assertion bug per #16932 Signed-off-by: Finn Carroll * Fix #16932. Ensure optimized path can only increase preparedRounding of agg. Signed-off-by: Finn Carroll * Spotless apply Signed-off-by: Finn Carroll * Fast fail filter rewrite opt in data histo aggs for non UTC timezones Signed-off-by: Finn Carroll * Remove redundant UTC check from getInterval(). Signed-off-by: Finn Carroll * Save a call to prepareRounding if roundingIdx is unchanged. Signed-off-by: Finn Carroll * Spotless apply Signed-off-by: Finn Carroll * Changelog Signed-off-by: Finn Carroll * Add ZoneId getter for date histo filter rewrite canOptimize check. Signed-off-by: Finn Carroll * Spotless apply Signed-off-by: Finn Carroll * Disable ff optimzation for composite agg in canOptimize. Signed-off-by: Finn Carroll * Spotless apply Signed-off-by: Finn Carroll * Handle utc timezone check Signed-off-by: bowenlan-amzn * Remove redundant timeZone getter. Signed-off-by: Finn Carroll * Simplify ff prepared rounding check. Signed-off-by: Finn Carroll --------- Signed-off-by: Finn Carroll Signed-off-by: bowenlan-amzn Co-authored-by: bowenlan-amzn --- CHANGELOG.md | 1 + .../java/org/opensearch/common/Rounding.java | 27 +++++---- .../org/opensearch/search/DocValueFormat.java | 4 ++ .../bucket/composite/CompositeAggregator.java | 8 +++ .../DateHistogramAggregatorBridge.java | 10 +++- .../AutoDateHistogramAggregator.java | 50 +++++++++++++++-- .../histogram/DateHistogramAggregator.java | 2 +- .../AutoDateHistogramAggregatorTests.java | 55 +++++++++++++++++++ .../aggregations/AggregatorTestCase.java | 22 ++++++-- 9 files changed, 157 insertions(+), 22 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 239ef81f062f7..cecc85c46c006 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -117,6 +117,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Use OpenSearch version to deserialize remote custom metadata([#16494](https://github.com/opensearch-project/OpenSearch/pull/16494)) +- Fix AutoDateHistogramAggregator rounding assertion failure ([#17023](https://github.com/opensearch-project/OpenSearch/pull/17023)) ### Security diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index 5d1251e9bed7c..c6fa4915ad05a 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -668,6 +668,11 @@ public String toString() { return "Rounding[" + unit + " in " + timeZone + "]"; } + @Override + public boolean isUTC() { + return "Z".equals(timeZone.getDisplayName(TextStyle.FULL, Locale.ENGLISH)); + } + private abstract class TimeUnitPreparedRounding extends PreparedRounding { @Override public double roundingSize(long utcMillis, DateTimeUnit timeUnit) { @@ -1045,6 +1050,11 @@ public String toString() { return "Rounding[" + interval + " in " + timeZone + "]"; } + @Override + public boolean isUTC() { + return "Z".equals(timeZone.getDisplayName(TextStyle.FULL, Locale.ENGLISH)); + } + private long roundKey(long value, long interval) { if (value < 0) { return (value - interval + 1) / interval; @@ -1364,6 +1374,11 @@ public boolean equals(Object obj) { public String toString() { return delegate + " offset by " + offset; } + + @Override + public boolean isUTC() { + return delegate.isUTC(); + } } public static Rounding read(StreamInput in) throws IOException { @@ -1391,16 +1406,8 @@ public static OptionalLong getInterval(Rounding rounding) { if (rounding instanceof TimeUnitRounding) { interval = (((TimeUnitRounding) rounding).unit).extraLocalOffsetLookup(); - if (!isUTCTimeZone(((TimeUnitRounding) rounding).timeZone)) { - // Fast filter aggregation cannot be used if it needs time zone rounding - return OptionalLong.empty(); - } } else if (rounding instanceof TimeIntervalRounding) { interval = ((TimeIntervalRounding) rounding).interval; - if (!isUTCTimeZone(((TimeIntervalRounding) rounding).timeZone)) { - // Fast filter aggregation cannot be used if it needs time zone rounding - return OptionalLong.empty(); - } } else { return OptionalLong.empty(); } @@ -1412,7 +1419,5 @@ public static OptionalLong getInterval(Rounding rounding) { * Helper function for checking if the time zone requested for date histogram * aggregation is utc or not */ - private static boolean isUTCTimeZone(final ZoneId zoneId) { - return "Z".equals(zoneId.getDisplayName(TextStyle.FULL, Locale.ENGLISH)); - } + public abstract boolean isUTC(); } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 9fae14f69b0af..d2a627eda1d15 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -286,6 +286,10 @@ public DateMathParser getDateMathParser() { return parser; } + public ZoneId getZoneId() { + return timeZone; + } + @Override public String format(long value) { return formatter.format(resolution.toInstant(value).atZone(timeZone)); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index 7a0bffa0cf74a..7f5e23a3307ed 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -182,6 +182,14 @@ protected boolean canOptimize() { }); } + /** + * The filter rewrite optimized path does not support bucket intervals which are not fixed. + * For this reason we exclude non UTC timezones. + */ + if (valuesSource.getRounding().isUTC() == false) { + return false; + } + // bucketOrds is used for saving the date histogram results got from the optimization path bucketOrds = LongKeyedBucketOrds.build(context.bigArrays(), CardinalityUpperBound.ONE); return true; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java index c780732a5ddce..50fe6a8cbf69f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/filterrewrite/DateHistogramAggregatorBridge.java @@ -32,7 +32,15 @@ public abstract class DateHistogramAggregatorBridge extends AggregatorBridge { int maxRewriteFilters; - protected boolean canOptimize(ValuesSourceConfig config) { + protected boolean canOptimize(ValuesSourceConfig config, Rounding rounding) { + /** + * The filter rewrite optimized path does not support bucket intervals which are not fixed. + * For this reason we exclude non UTC timezones. + */ + if (rounding.isUTC() == false) { + return false; + } + if (config.script() == null && config.missing() == null) { MappedFieldType fieldType = config.fieldType(); if (fieldType instanceof DateFieldMapper.DateFieldType) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index f3a36b4882d19..cbeb27e8a3e63 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -149,7 +149,6 @@ private AutoDateHistogramAggregator( Aggregator parent, Map metadata ) throws IOException { - super(name, factories, aggregationContext, parent, metadata); this.targetBuckets = targetBuckets; // TODO: Remove null usage here, by using a different aggregator for create @@ -162,7 +161,7 @@ private AutoDateHistogramAggregator( DateHistogramAggregatorBridge bridge = new DateHistogramAggregatorBridge() { @Override protected boolean canOptimize() { - return canOptimize(valuesSourceConfig); + return canOptimize(valuesSourceConfig, roundingInfos[0].rounding); } @Override @@ -170,6 +169,17 @@ protected void prepare() throws IOException { buildRanges(context); } + /** + * The filter rewrite optimization uses this method to pre-emptively update the preparedRounding + * when considering the optimized path for a single segment. This is necessary since the optimized path + * skips doc collection entirely which is where the preparedRounding is normally updated. + * + * @param low lower bound of rounding to prepare + * @param high upper bound of rounding to prepare + * @return select a prepared rounding which satisfies the conditions: + * 1. Is at least as large as our previously prepared rounding + * 2. Must span a range of [low, high] with buckets <= targetBuckets + */ @Override protected Rounding getRounding(final long low, final long high) { // max - min / targetBuckets = bestDuration @@ -177,7 +187,8 @@ protected Rounding getRounding(final long low, final long high) { // since we cannot exceed targetBuckets, bestDuration should go up, // so the right innerInterval should be an upper bound long bestDuration = (high - low) / targetBuckets; - // reset so this function is idempotent + + int prevRoundingIdx = roundingIdx; roundingIdx = 0; while (roundingIdx < roundingInfos.length - 1) { final RoundingInfo curRoundingInfo = roundingInfos[roundingIdx]; @@ -190,7 +201,11 @@ protected Rounding getRounding(final long low, final long high) { roundingIdx++; } - preparedRounding = prepareRounding(roundingIdx); + // Ensure preparedRounding never shrinks + if (roundingIdx > prevRoundingIdx) { + preparedRounding = prepareRounding(roundingIdx); + } + return roundingInfos[roundingIdx].rounding; } @@ -403,12 +418,39 @@ private void collectValue(int doc, long rounded) throws IOException { increaseRoundingIfNeeded(rounded); } + /** + * Examine our current bucket count and the most recently added bucket to determine if an update to + * preparedRounding is required to keep total bucket count in compliance with targetBuckets. + * + * @param rounded the most recently collected value rounded + */ private void increaseRoundingIfNeeded(long rounded) { + // If we are already using the rounding with the largest interval nothing can be done if (roundingIdx >= roundingInfos.length - 1) { return; } + + // Re calculate the max and min values we expect to bucket according to most recently rounded val min = Math.min(min, rounded); max = Math.max(max, rounded); + + /** + * Quick explanation of the two below conditions: + * + * 1. [targetBuckets * roundingInfos[roundingIdx].getMaximumInnerInterval()] + * Represents the total bucket count possible before we will exceed targetBuckets + * even if we use the maximum inner interval of our current rounding. For example, consider the + * DAYS_OF_MONTH rounding where the maximum inner interval is 7 days (i.e. 1 week buckets). + * targetBuckets * roundingInfos[roundingIdx].getMaximumInnerInterval() would then be the number of + * 1 day buckets possible such that if we re-bucket to 1 week buckets we will have more 1 week buckets + * than our targetBuckets limit. If the current count of buckets exceeds this limit we must update + * our rounding. + * + * 2. [targetBuckets * roundingInfos[roundingIdx].getMaximumRoughEstimateDurationMillis()] + * The total duration of ms covered by our current rounding. In the case of MINUTES_OF_HOUR rounding + * getMaximumRoughEstimateDurationMillis is 60000. If our current total range in millis (max - min) + * exceeds this range we must update our rounding. + */ if (bucketOrds.size() <= targetBuckets * roundingInfos[roundingIdx].getMaximumInnerInterval() && max - min <= targetBuckets * roundingInfos[roundingIdx].getMaximumRoughEstimateDurationMillis()) { return; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 23fbacc979224..49672831625e4 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -143,7 +143,7 @@ class DateHistogramAggregator extends BucketsAggregator implements SizedBucketAg DateHistogramAggregatorBridge bridge = new DateHistogramAggregatorBridge() { @Override protected boolean canOptimize() { - return canOptimize(valuesSourceConfig); + return canOptimize(valuesSourceConfig, rounding); } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index dda053af78b30..95f56d779b088 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -38,7 +38,9 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; @@ -72,6 +74,7 @@ import java.time.Instant; import java.time.LocalDate; import java.time.YearMonth; +import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; @@ -912,6 +915,58 @@ public void testWithPipelineReductions() throws IOException { ); } + // Bugfix: https://github.com/opensearch-project/OpenSearch/issues/16932 + public void testFilterRewriteWithTZRoundingRangeAssert() throws IOException { + /* + multiBucketIndexData must overlap with DST to produce a 'LinkedListLookup' prepared rounding. + This lookup rounding style maintains a strict max/min input range and will assert each value is in range. + */ + final List multiBucketIndexData = Arrays.asList( + ZonedDateTime.of(2023, 10, 10, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2023, 11, 11, 0, 0, 0, 0, ZoneOffset.UTC) + ); + + final List singleBucketIndexData = Arrays.asList(ZonedDateTime.of(2023, 12, 27, 0, 0, 0, 0, ZoneOffset.UTC)); + + try (Directory directory = newDirectory()) { + /* + Ensure we produce two segments on one shard such that the documents in seg 1 will be out of range of the + prepared rounding produced by the filter rewrite optimization considering seg 2 for optimized path. + */ + IndexWriterConfig c = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE); + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory, c)) { + indexSampleData(multiBucketIndexData, indexWriter); + indexWriter.flush(); + indexSampleData(singleBucketIndexData, indexWriter); + } + + try (IndexReader indexReader = DirectoryReader.open(directory)) { + final IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + // Force agg to update rounding when it begins collecting from the second segment. + final AutoDateHistogramAggregationBuilder aggregationBuilder = new AutoDateHistogramAggregationBuilder("_name"); + aggregationBuilder.setNumBuckets(3).field(DATE_FIELD).timeZone(ZoneId.of("America/New_York")); + + Map expectedDocCount = new TreeMap<>(); + expectedDocCount.put("2023-10-01T00:00:00.000-04:00", 1); + expectedDocCount.put("2023-11-01T00:00:00.000-04:00", 1); + expectedDocCount.put("2023-12-01T00:00:00.000-05:00", 1); + + final InternalAutoDateHistogram histogram = searchAndReduce( + indexSearcher, + DEFAULT_QUERY, + aggregationBuilder, + false, + new DateFieldMapper.DateFieldType(aggregationBuilder.field()), + new NumberFieldMapper.NumberFieldType(INSTANT_FIELD, NumberFieldMapper.NumberType.LONG), + new NumberFieldMapper.NumberFieldType(NUMERIC_FIELD, NumberFieldMapper.NumberType.LONG) + ); + + assertThat(bucketCountsAsMap(histogram), equalTo(expectedDocCount)); + } + } + } + @Override protected IndexSettings createIndexSettings() { final Settings nodeSettings = Settings.builder().put("search.max_buckets", 25000).build(); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index 7ba2f1284d551..b982665e01d8a 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -611,9 +611,19 @@ protected A searchAndReduc IndexSearcher searcher, Query query, AggregationBuilder builder, + boolean shardFanOut, MappedFieldType... fieldTypes ) throws IOException { - return searchAndReduce(createIndexSettings(), searcher, query, builder, DEFAULT_MAX_BUCKETS, fieldTypes); + return searchAndReduce(createIndexSettings(), searcher, query, builder, DEFAULT_MAX_BUCKETS, shardFanOut, fieldTypes); + } + + protected A searchAndReduce( + IndexSearcher searcher, + Query query, + AggregationBuilder builder, + MappedFieldType... fieldTypes + ) throws IOException { + return searchAndReduce(createIndexSettings(), searcher, query, builder, DEFAULT_MAX_BUCKETS, randomBoolean(), fieldTypes); } protected A searchAndReduce( @@ -623,7 +633,7 @@ protected A searchAndReduc AggregationBuilder builder, MappedFieldType... fieldTypes ) throws IOException { - return searchAndReduce(indexSettings, searcher, query, builder, DEFAULT_MAX_BUCKETS, fieldTypes); + return searchAndReduce(indexSettings, searcher, query, builder, DEFAULT_MAX_BUCKETS, randomBoolean(), fieldTypes); } protected A searchAndReduce( @@ -633,7 +643,7 @@ protected A searchAndReduc int maxBucket, MappedFieldType... fieldTypes ) throws IOException { - return searchAndReduce(createIndexSettings(), searcher, query, builder, maxBucket, fieldTypes); + return searchAndReduce(createIndexSettings(), searcher, query, builder, maxBucket, randomBoolean(), fieldTypes); } protected A searchAndReduce( @@ -642,9 +652,10 @@ protected A searchAndReduc Query query, AggregationBuilder builder, int maxBucket, + boolean shardFanOut, MappedFieldType... fieldTypes ) throws IOException { - return searchAndReduce(indexSettings, searcher, query, builder, maxBucket, false, fieldTypes); + return searchAndReduce(indexSettings, searcher, query, builder, maxBucket, false, shardFanOut, fieldTypes); } /** @@ -662,6 +673,7 @@ protected A searchAndReduc AggregationBuilder builder, int maxBucket, boolean hasNested, + boolean shardFanOut, MappedFieldType... fieldTypes ) throws IOException { final IndexReaderContext ctx = searcher.getTopReaderContext(); @@ -677,7 +689,7 @@ protected A searchAndReduc ); C root = createAggregator(query, builder, searcher, bucketConsumer, fieldTypes); - if (randomBoolean() && searcher.getIndexReader().leaves().size() > 0) { + if (shardFanOut && searcher.getIndexReader().leaves().size() > 0) { assertThat(ctx, instanceOf(CompositeReaderContext.class)); final CompositeReaderContext compCTX = (CompositeReaderContext) ctx; final int size = compCTX.leaves().size(); From c5ab16264d5e3624e2cf368f475ff3b5bc6e0bf9 Mon Sep 17 00:00:00 2001 From: Kaushal Kumar Date: Tue, 28 Jan 2025 13:00:07 -0800 Subject: [PATCH 24/48] [WLM] Add wlm support for scroll API (#16981) * add wlm support for scroll API Signed-off-by: Kaushal Kumar * add CHANGELOG entry Signed-off-by: Kaushal Kumar * remove untagged tasks from WLM tracking Signed-off-by: Kaushal Kumar * add UTs for invalid tasks Signed-off-by: Kaushal Kumar * fix UT failures Signed-off-by: Kaushal Kumar * rename a field in QueryGroupTask Signed-off-by: Kaushal Kumar --------- Signed-off-by: Kaushal Kumar --- CHANGELOG.md | 1 + .../search/TransportSearchScrollAction.java | 12 +++- .../org/opensearch/wlm/QueryGroupTask.java | 6 ++ ...QueryGroupResourceUsageTrackerService.java | 1 + .../QueryGroupTaskResourceTrackingTests.java | 66 +++++++++++++++++++ ...rceUsageCalculatorTrackerServiceTests.java | 1 + 6 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 server/src/test/java/org/opensearch/wlm/tracker/QueryGroupTaskResourceTrackingTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index cecc85c46c006..0200f7640bc1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -115,6 +115,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) - Stop processing search requests when _msearch request is cancelled ([#17005](https://github.com/opensearch-project/OpenSearch/pull/17005)) - Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) +- [WLM] Add WLM support for search scroll API ([#16981](https://github.com/opensearch-project/OpenSearch/pull/16981)) - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Use OpenSearch version to deserialize remote custom metadata([#16494](https://github.com/opensearch-project/OpenSearch/pull/16494)) - Fix AutoDateHistogramAggregator rounding assertion failure ([#17023](https://github.com/opensearch-project/OpenSearch/pull/17023)) diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java index 4713d03c93bac..01bf5754a42a1 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchScrollAction.java @@ -39,7 +39,9 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.opensearch.wlm.QueryGroupTask; /** * Perform the search scroll @@ -51,6 +53,7 @@ public class TransportSearchScrollAction extends HandledTransportAction) SearchScrollRequest::new); this.clusterService = clusterService; this.searchTransportService = searchTransportService; this.searchPhaseController = searchPhaseController; + this.threadPool = threadPool; } @Override protected void doExecute(Task task, SearchScrollRequest request, ActionListener listener) { try { + + if (task instanceof QueryGroupTask) { + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + } + ParsedScrollId scrollId = TransportSearchHelper.parseScrollId(request.scrollId()); Runnable action; switch (scrollId.getType()) { diff --git a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java index 97c48bd828978..c6b7fee3b04c0 100644 --- a/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java +++ b/server/src/main/java/org/opensearch/wlm/QueryGroupTask.java @@ -33,6 +33,7 @@ public class QueryGroupTask extends CancellableTask { public static final Supplier DEFAULT_QUERY_GROUP_ID_SUPPLIER = () -> "DEFAULT_QUERY_GROUP"; private final LongSupplier nanoTimeSupplier; private String queryGroupId; + private boolean isQueryGroupSet = false; public QueryGroupTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers) { this(id, type, action, description, parentTaskId, headers, NO_TIMEOUT, System::nanoTime); @@ -81,6 +82,7 @@ public final String getQueryGroupId() { * @param threadContext current threadContext */ public final void setQueryGroupId(final ThreadContext threadContext) { + isQueryGroupSet = true; if (threadContext != null && threadContext.getHeader(QUERY_GROUP_ID_HEADER) != null) { this.queryGroupId = threadContext.getHeader(QUERY_GROUP_ID_HEADER); } else { @@ -92,6 +94,10 @@ public long getElapsedTime() { return nanoTimeSupplier.getAsLong() - getStartTimeNanos(); } + public boolean isQueryGroupSet() { + return isQueryGroupSet; + } + @Override public boolean shouldCancelChildrenOnCancellation() { return false; diff --git a/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java b/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java index 19f7bf48d8421..71cf3135781dd 100644 --- a/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java +++ b/server/src/main/java/org/opensearch/wlm/tracker/QueryGroupResourceUsageTrackerService.java @@ -76,6 +76,7 @@ private Map> getTasksGroupedByQueryGroup() { .stream() .filter(QueryGroupTask.class::isInstance) .map(QueryGroupTask.class::cast) + .filter(QueryGroupTask::isQueryGroupSet) .collect(Collectors.groupingBy(QueryGroupTask::getQueryGroupId, Collectors.mapping(task -> task, Collectors.toList()))); } } diff --git a/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupTaskResourceTrackingTests.java b/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupTaskResourceTrackingTests.java new file mode 100644 index 0000000000000..5d54de3536596 --- /dev/null +++ b/server/src/test/java/org/opensearch/wlm/tracker/QueryGroupTaskResourceTrackingTests.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm.tracker; + +import org.opensearch.action.search.SearchTask; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.tasks.TaskResourceTrackingService; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.wlm.QueryGroupLevelResourceUsageView; +import org.opensearch.wlm.QueryGroupTask; + +import java.util.HashMap; +import java.util.Map; + +public class QueryGroupTaskResourceTrackingTests extends OpenSearchTestCase { + ThreadPool threadPool; + QueryGroupResourceUsageTrackerService queryGroupResourceUsageTrackerService; + TaskResourceTrackingService taskResourceTrackingService; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool("workload-management-tracking-thread-pool"); + taskResourceTrackingService = new TaskResourceTrackingService( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + threadPool + ); + queryGroupResourceUsageTrackerService = new QueryGroupResourceUsageTrackerService(taskResourceTrackingService); + } + + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdownNow(); + } + + public void testValidQueryGroupTasksCase() { + taskResourceTrackingService.setTaskResourceTrackingEnabled(true); + QueryGroupTask task = new SearchTask(1, "test", "test", () -> "Test", TaskId.EMPTY_TASK_ID, new HashMap<>()); + taskResourceTrackingService.startTracking(task); + + // since the query group id is not set we should not track this task + Map resourceUsageViewMap = queryGroupResourceUsageTrackerService + .constructQueryGroupLevelUsageViews(); + assertTrue(resourceUsageViewMap.isEmpty()); + + // Now since this task has a valid queryGroupId header it should be tracked + try (ThreadContext.StoredContext context = threadPool.getThreadContext().stashContext()) { + threadPool.getThreadContext().putHeader(QueryGroupTask.QUERY_GROUP_ID_HEADER, "testHeader"); + task.setQueryGroupId(threadPool.getThreadContext()); + resourceUsageViewMap = queryGroupResourceUsageTrackerService.constructQueryGroupLevelUsageViews(); + assertFalse(resourceUsageViewMap.isEmpty()); + } + } +} diff --git a/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java index fe72bd6e710c8..c14ac6a143c95 100644 --- a/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java +++ b/server/src/test/java/org/opensearch/wlm/tracker/ResourceUsageCalculatorTrackerServiceTests.java @@ -146,6 +146,7 @@ private T createMockTask(Class type, long cpuUsage when(task.getTotalResourceUtilization(ResourceStats.MEMORY)).thenReturn(heapUsage); when(task.getStartTimeNanos()).thenReturn((long) 0); when(task.getElapsedTime()).thenReturn(clock.getTime()); + when(task.isQueryGroupSet()).thenReturn(true); AtomicBoolean isCancelled = new AtomicBoolean(false); doAnswer(invocation -> { From 1ca338d2e63a0823e2123f3e519575e5e11cbcae Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Tue, 28 Jan 2025 13:48:22 -0800 Subject: [PATCH 25/48] Remove more deprecated master classes (#17166) Signed-off-by: Andrew Ross --- .../remotestore/RemoteRestoreSnapshotIT.java | 2 +- ...terManagerNodeOperationRequestBuilder.java | 22 ------ .../cluster/LocalNodeMasterListener.java | 76 ------------------- .../cluster/MasterNodeChangePredicate.java | 53 ------------- .../cluster/NotMasterException.java | 57 -------------- .../coordination/NoMasterBlockService.java | 50 ------------ .../UnsafeBootstrapMasterCommand.java | 47 ------------ .../service/ClusterApplierService.java | 10 --- .../settings/ConsistentSettingsService.java | 12 +-- .../MasterNotDiscoveredException.java | 63 --------------- .../rest/action/cat/RestMasterAction.java | 44 ----------- .../ExceptionSerializationTests.java | 5 -- .../RenamedTimeoutRequestParameterTests.java | 3 +- ...ansportClusterManagerNodeActionUtils.java} | 2 +- .../service/ClusterApplierServiceTests.java | 7 +- .../ConsistentSettingsServiceTests.java | 16 ++-- .../indices/cluster/ClusterStateChanges.java | 4 +- 17 files changed, 22 insertions(+), 451 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java delete mode 100644 server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java delete mode 100644 server/src/main/java/org/opensearch/cluster/NotMasterException.java delete mode 100644 server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java delete mode 100644 server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java delete mode 100644 server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java delete mode 100644 server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java rename server/src/test/java/org/opensearch/action/support/clustermanager/{TransportMasterNodeActionUtils.java => TransportClusterManagerNodeActionUtils.java} (97%) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 3b96636cfe771..bc6ee35907220 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -1143,7 +1143,7 @@ public void testConcurrentSnapshotV2CreateOperation_MasterChange() throws Except .cluster() .prepareCreateSnapshot(snapshotRepoName, snapshotName) .setWaitForCompletion(true) - .setMasterNodeTimeout(TimeValue.timeValueSeconds(60)) + .setClusterManagerNodeTimeout(TimeValue.timeValueSeconds(60)) .get(); } catch (Exception ignored) {} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java index 05c06604725a1..03fc76c85e0e2 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java @@ -63,17 +63,6 @@ public final RequestBuilder setClusterManagerNodeTimeout(TimeValue timeout) { return (RequestBuilder) this; } - /** - * Sets the cluster-manager node timeout in case the cluster-manager has not yet been discovered. - * - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(TimeValue)} - */ - @SuppressWarnings("unchecked") - @Deprecated - public final RequestBuilder setMasterNodeTimeout(TimeValue timeout) { - return setClusterManagerNodeTimeout(timeout); - } - /** * Sets the cluster-manager node timeout in case the cluster-manager has not yet been discovered. */ @@ -82,15 +71,4 @@ public final RequestBuilder setClusterManagerNodeTimeout(String timeout) { request.clusterManagerNodeTimeout(timeout); return (RequestBuilder) this; } - - /** - * Sets the cluster-manager node timeout in case the cluster-manager has not yet been discovered. - * - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(String)} - */ - @SuppressWarnings("unchecked") - @Deprecated - public final RequestBuilder setMasterNodeTimeout(String timeout) { - return setClusterManagerNodeTimeout(timeout); - } } diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java deleted file mode 100644 index 2487aaf0d7c51..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/LocalNodeMasterListener.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster; - -import org.opensearch.common.annotation.DeprecatedApi; - -/** - * Enables listening to cluster-manager changes events of the local node (when the local node becomes the cluster-manager, and when the local - * node cease being a cluster-manager). - * - * @opensearch.api - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link LocalNodeClusterManagerListener} - */ -@Deprecated -@DeprecatedApi(since = "2.2.0") -public interface LocalNodeMasterListener extends LocalNodeClusterManagerListener { - - /** - * Called when local node is elected to be the cluster-manager. - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #onClusterManager()} - */ - @Deprecated - void onMaster(); - - /** - * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #offClusterManager()} - */ - @Deprecated - void offMaster(); - - /** - * Called when local node is elected to be the cluster-manager. - */ - @Override - default void onClusterManager() { - onMaster(); - } - - /** - * Called when the local node used to be the cluster-manager, a new cluster-manager was elected and it's no longer the local node. - */ - @Override - default void offClusterManager() { - offMaster(); - } -} diff --git a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java b/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java deleted file mode 100644 index d06aa219e3ca6..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/MasterNodeChangePredicate.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster; - -import java.util.function.Predicate; - -/** - * Utility class to build a predicate that accepts cluster state changes - * - * @opensearch.internal - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link ClusterManagerNodeChangePredicate} - */ -@Deprecated -public final class MasterNodeChangePredicate { - - private MasterNodeChangePredicate() { - - } - - public static Predicate build(ClusterState currentState) { - return ClusterManagerNodeChangePredicate.build(currentState); - } -} diff --git a/server/src/main/java/org/opensearch/cluster/NotMasterException.java b/server/src/main/java/org/opensearch/cluster/NotMasterException.java deleted file mode 100644 index 8cdd0f8332212..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/NotMasterException.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster; - -import org.opensearch.core.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * Thrown when a node join request or a cluster-manager ping reaches a node which is not - * currently acting as a cluster-manager or when a cluster state update task is to be executed - * on a node that is no longer cluster-manager. - * - * @opensearch.internal - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link NotClusterManagerException} - */ -@Deprecated -public class NotMasterException extends NotClusterManagerException { - - public NotMasterException(String msg) { - super(msg); - } - - public NotMasterException(StreamInput in) throws IOException { - super(in); - } - -} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java deleted file mode 100644 index 1dff7b2a8f0d6..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoMasterBlockService.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster.coordination; - -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Settings; - -/** - * Service to block the master node - * - * @opensearch.internal - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link NoClusterManagerBlockService} - */ -@Deprecated -public class NoMasterBlockService extends NoClusterManagerBlockService { - - public NoMasterBlockService(Settings settings, ClusterSettings clusterSettings) { - super(settings, clusterSettings); - } - -} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java deleted file mode 100644 index 6014dc0b44ab8..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/coordination/UnsafeBootstrapMasterCommand.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cluster.coordination; - -/** - * Tool to run an unsafe bootstrap - * - * @opensearch.internal - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link UnsafeBootstrapClusterManagerCommand} - */ -@Deprecated -public class UnsafeBootstrapMasterCommand extends UnsafeBootstrapClusterManagerCommand { - - UnsafeBootstrapMasterCommand() { - super(); - } - -} diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 6489f3cb33ce0..7ab1a082a4620 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -43,7 +43,6 @@ import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.LocalNodeClusterManagerListener; -import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.TimeoutClusterStateListener; import org.opensearch.cluster.metadata.ProcessClusterEventTimeoutException; @@ -308,15 +307,6 @@ public void addLocalNodeClusterManagerListener(LocalNodeClusterManagerListener l addListener(listener); } - /** - * Add a listener for on/off local node cluster-manager events - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #addLocalNodeClusterManagerListener} - */ - @Deprecated - public void addLocalNodeMasterListener(LocalNodeMasterListener listener) { - addLocalNodeClusterManagerListener(listener); - } - /** * Adds a cluster state listener that is expected to be removed during a short period of time. * If provided, the listener will be notified once a specific time has elapsed. diff --git a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java index ecd38810e8636..a7b99d33a8d3a 100644 --- a/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java +++ b/server/src/main/java/org/opensearch/common/settings/ConsistentSettingsService.java @@ -36,7 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; -import org.opensearch.cluster.LocalNodeMasterListener; +import org.opensearch.cluster.LocalNodeClusterManagerListener; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; @@ -89,10 +89,10 @@ public ConsistentSettingsService(Settings settings, ClusterService clusterServic } /** - * Returns a {@link LocalNodeMasterListener} that will publish hashes of all the settings passed in the constructor. These hashes are + * Returns a {@link LocalNodeClusterManagerListener} that will publish hashes of all the settings passed in the constructor. These hashes are * published by the cluster-manager node only. Note that this is not designed for {@link SecureSettings} implementations that are mutable. */ - public LocalNodeMasterListener newHashPublisher() { + public LocalNodeClusterManagerListener newHashPublisher() { // eagerly compute hashes to be published final Map computedHashesOfConsistentSettings = computeHashesOfConsistentSecureSettings(); return new HashesPublisher(computedHashesOfConsistentSettings, clusterService); @@ -247,7 +247,7 @@ private byte[] computeSaltedPBKDF2Hash(byte[] bytes, byte[] salt) { } } - static final class HashesPublisher implements LocalNodeMasterListener { + static final class HashesPublisher implements LocalNodeClusterManagerListener { // eagerly compute hashes to be published final Map computedHashesOfConsistentSettings; @@ -259,7 +259,7 @@ static final class HashesPublisher implements LocalNodeMasterListener { } @Override - public void onMaster() { + public void onClusterManager() { clusterService.submitStateUpdateTask("publish-secure-settings-hashes", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { @@ -285,7 +285,7 @@ public void onFailure(String source, Exception e) { } @Override - public void offMaster() { + public void offClusterManager() { logger.trace("I am no longer master, nothing to do"); } } diff --git a/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java b/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java deleted file mode 100644 index 3915ae5ead999..0000000000000 --- a/server/src/main/java/org/opensearch/discovery/MasterNotDiscoveredException.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.discovery; - -import org.opensearch.core.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * Exception when the cluster-manager is not discovered - * - * @opensearch.internal - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link ClusterManagerNotDiscoveredException} - */ -@Deprecated -public class MasterNotDiscoveredException extends ClusterManagerNotDiscoveredException { - - public MasterNotDiscoveredException() { - super(); - } - - public MasterNotDiscoveredException(Throwable cause) { - super(cause); - } - - public MasterNotDiscoveredException(String message) { - super(message); - } - - public MasterNotDiscoveredException(StreamInput in) throws IOException { - super(in); - } -} diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java deleted file mode 100644 index 20f7b01ef2b42..0000000000000 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestMasterAction.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.rest.action.cat; - -/** - * _cat API action to list cluster_manager information - * - * @opensearch.api - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link RestClusterManagerAction} - */ -@Deprecated -public class RestMasterAction extends RestClusterManagerAction { - -} diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index c1972daeab6d3..590f85dc15269 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -46,7 +46,6 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.client.AbstractClientHeadersTestCase; -import org.opensearch.cluster.NotMasterException; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.IndexCreateBlockException; @@ -88,7 +87,6 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentLocation; import org.opensearch.crypto.CryptoRegistryException; -import org.opensearch.discovery.MasterNotDiscoveredException; import org.opensearch.env.ShardLockObtainFailedException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.query.QueryShardException; @@ -259,9 +257,6 @@ public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOEx Files.walkFileTree(testStartPath, visitor); assertTrue(notRegistered.remove(TestException.class)); assertTrue(notRegistered.remove(UnknownHeaderException.class)); - // Remove the deprecated exception classes from the unregistered list. - assertTrue(notRegistered.remove(NotMasterException.class)); - assertTrue(notRegistered.remove(MasterNotDiscoveredException.class)); assertTrue( "Classes subclassing OpenSearchException must be registered in OpenSearchException.OpenSearchExceptionHandle \n" + notRegistered, diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 26c4670b4288c..a7a45dc93e1cd 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -70,7 +70,6 @@ import org.opensearch.rest.action.cat.RestAllocationAction; import org.opensearch.rest.action.cat.RestClusterManagerAction; import org.opensearch.rest.action.cat.RestIndicesAction; -import org.opensearch.rest.action.cat.RestMasterAction; import org.opensearch.rest.action.cat.RestNodeAttrsAction; import org.opensearch.rest.action.cat.RestNodesAction; import org.opensearch.rest.action.cat.RestPendingClusterTasksAction; @@ -174,7 +173,7 @@ public void testCatClusterManager() { } public void testCatMaster() { - RestMasterAction action = new RestMasterAction(); + RestClusterManagerAction action = new RestClusterManagerAction(); Exception e = assertThrows(OpenSearchParseException.class, () -> action.doCatRequest(getRestRequestWithBothParams(), client)); assertThat(e.getMessage(), containsString(DUPLICATE_PARAMETER_ERROR_MESSAGE)); assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionUtils.java similarity index 97% rename from server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java rename to server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionUtils.java index ce44c4d2c5b48..f997e27639898 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionUtils.java @@ -36,7 +36,7 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; -public class TransportMasterNodeActionUtils { +public class TransportClusterManagerNodeActionUtils { /** * Allows to directly call {@link TransportClusterManagerNodeAction#clusterManagerOperation(ClusterManagerNodeRequest, ClusterState, ActionListener)} which is diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index be6057a391b2e..b97656304c46f 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -40,7 +40,6 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.LocalNodeClusterManagerListener; -import org.opensearch.cluster.LocalNodeMasterListener; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.Metadata; @@ -386,14 +385,14 @@ public void testDeprecatedLocalNodeMasterListenerCallbacks() { TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false, Optional.empty()); AtomicBoolean isClusterManager = new AtomicBoolean(); - timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() { + timedClusterApplierService.addLocalNodeClusterManagerListener(new LocalNodeClusterManagerListener() { @Override - public void onMaster() { + public void onClusterManager() { isClusterManager.set(true); } @Override - public void offMaster() { + public void offClusterManager() { isClusterManager.set(false); } }); diff --git a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java index 86bc124007829..1f63a3724f258 100644 --- a/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java +++ b/server/src/test/java/org/opensearch/common/settings/ConsistentSettingsServiceTests.java @@ -76,7 +76,7 @@ public void testSingleStringSetting() throws Exception { // hashes not yet published assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); ConsistentSettingsService consistentService = new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)); assertThat(consistentService.areAllConsistent(), is(true)); // change value @@ -84,7 +84,7 @@ public void testSingleStringSetting() throws Exception { assertThat(consistentService.areAllConsistent(), is(false)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); // publish change - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); assertThat(consistentService.areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); } @@ -109,7 +109,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); ConsistentSettingsService consistentService = new ConsistentSettingsService( settings, clusterService, @@ -124,7 +124,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish change - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(consistentService.areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); // add value @@ -137,7 +137,7 @@ public void testSingleAffixSetting() throws Exception { is(false) ); // publish - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); // remove value secureSettings = new MockSecureSettings(); @@ -174,7 +174,7 @@ public void testStringAndAffixSettings() throws Exception { is(false) ); // publish only the simple string setting - new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); assertThat( new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), @@ -185,7 +185,7 @@ public void testStringAndAffixSettings() throws Exception { is(false) ); // publish only the affix string setting - new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onMaster(); + new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).newHashPublisher().onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(false)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); assertThat( @@ -194,7 +194,7 @@ public void testStringAndAffixSettings() throws Exception { ); // publish both settings new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting, affixStringSetting)).newHashPublisher() - .onMaster(); + .onClusterManager(); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(stringSetting)).areAllConsistent(), is(true)); assertThat(new ConsistentSettingsService(settings, clusterService, Arrays.asList(affixStringSetting)).areAllConsistent(), is(true)); assertThat( diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index b11a583de4eee..e271a0bc8ffa3 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -56,7 +56,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; -import org.opensearch.action.support.clustermanager.TransportMasterNodeActionUtils; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeActionUtils; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; @@ -513,7 +513,7 @@ private , Response extends Ac ) { return executeClusterStateUpdateTask(clusterState, () -> { try { - TransportMasterNodeActionUtils.runClusterManagerOperation( + TransportClusterManagerNodeActionUtils.runClusterManagerOperation( masterNodeAction, request, clusterState, From 3c55a311a3305f64bbfc9548569bd36469b867a7 Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Tue, 28 Jan 2025 17:55:45 -0800 Subject: [PATCH 26/48] Fix failing CI's (#17172) --- CHANGELOG.md | 1 + gradle/formatting.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0200f7640bc1b..86295cb34f8de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -119,6 +119,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Use OpenSearch version to deserialize remote custom metadata([#16494](https://github.com/opensearch-project/OpenSearch/pull/16494)) - Fix AutoDateHistogramAggregator rounding assertion failure ([#17023](https://github.com/opensearch-project/OpenSearch/pull/17023)) +- Fix the failing CI's with `Failed to load eclipse jdt formatter` error ([#17172](https://github.com/opensearch-project/OpenSearch/pull/17172)) ### Security diff --git a/gradle/formatting.gradle b/gradle/formatting.gradle index f3a4bf5cc765b..45d63fd43e875 100644 --- a/gradle/formatting.gradle +++ b/gradle/formatting.gradle @@ -82,7 +82,7 @@ allprojects { '\\#java|\\#org.opensearch|\\#org.hamcrest|\\#' ) - eclipse().configFile rootProject.file('buildSrc/formatterConfig.xml') + eclipse().withP2Mirrors(Map.of("https://download.eclipse.org/", "https://mirror.umd.edu/eclipse/")).configFile rootProject.file('buildSrc/formatterConfig.xml') trimTrailingWhitespace() endWithNewline() From b9ddef9c9749e02deeccf95946b60b2303bbfd9c Mon Sep 17 00:00:00 2001 From: expani1729 <110471048+expani@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:13:06 -0800 Subject: [PATCH 27/48] Extensible design to add new query and field type support for Star Tree (#17137) --------- Signed-off-by: expani --- CHANGELOG.md | 1 + .../node/FixedLengthStarTreeNode.java | 123 ++++- .../datacube/startree/node/StarTreeNode.java | 23 +- .../startree/utils/StarTreeQueryHelper.java | 306 ------------ .../SortedNumericStarTreeValuesIterator.java | 5 + .../SortedSetStarTreeValuesIterator.java | 12 + .../iterator/StarTreeValuesIterator.java | 5 + .../index/mapper/NumberFieldMapper.java | 2 +- .../index/query/QueryShardContext.java | 11 + .../org/opensearch/search/SearchService.java | 16 +- .../histogram/DateHistogramAggregator.java | 25 +- .../aggregations/metrics/AvgAggregator.java | 8 +- .../aggregations/metrics/MaxAggregator.java | 26 +- .../aggregations/metrics/MinAggregator.java | 26 +- .../aggregations/metrics/SumAggregator.java | 13 +- .../metrics/ValueCountAggregator.java | 13 +- .../search/internal/SearchContext.java | 11 - .../startree/StarTreeNodeCollector.java | 25 + .../search/startree/StarTreeQueryContext.java | 211 ++++++-- .../search/startree/StarTreeQueryHelper.java | 203 ++++++++ ...Filter.java => StarTreeTraversalUtil.java} | 77 +-- .../startree/filter/DimensionFilter.java | 89 ++++ .../startree/filter/ExactMatchDimFilter.java | 84 ++++ .../startree/filter/MatchNoneFilter.java | 36 ++ .../startree/filter/RangeMatchDimFilter.java | 89 ++++ .../startree/filter/StarTreeFilter.java | 42 ++ .../search/startree/filter/package-info.java | 10 + .../provider/DimensionFilterMapper.java | 410 ++++++++++++++++ .../provider/StarTreeFilterProvider.java | 159 ++++++ .../filter/provider/package-info.java | 10 + .../FixedLengthStarTreeNodeSearchTests.java | 338 +++++++++++++ .../search/SearchServiceStarTreeTests.java | 359 ++++++++++++-- .../startree/ArrayBasedCollector.java | 42 ++ .../DateHistogramAggregatorTests.java | 21 +- .../DimensionFilterAndMapperTests.java | 193 ++++++++ .../startree/MetricAggregatorTests.java | 463 ++++++++++++------ .../startree/StarTreeFilterTests.java | 236 +++++++-- .../aggregations/AggregatorTestCase.java | 33 +- 38 files changed, 3035 insertions(+), 721 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java create mode 100644 server/src/main/java/org/opensearch/search/startree/StarTreeNodeCollector.java create mode 100644 server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java rename server/src/main/java/org/opensearch/search/startree/{StarTreeFilter.java => StarTreeTraversalUtil.java} (76%) create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/DimensionFilter.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/MatchNoneFilter.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/StarTreeFilter.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/package-info.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/provider/StarTreeFilterProvider.java create mode 100644 server/src/main/java/org/opensearch/search/startree/filter/provider/package-info.java create mode 100644 server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/ArrayBasedCollector.java create mode 100644 server/src/test/java/org/opensearch/search/aggregations/startree/DimensionFilterAndMapperTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 86295cb34f8de..f386b092cf074 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce Template query ([#16818](https://github.com/opensearch-project/OpenSearch/pull/16818)) - Propagate the sourceIncludes and excludes fields from fetchSourceContext to FieldsVisitor. ([#17080](https://github.com/opensearch-project/OpenSearch/pull/17080)) - [Star Tree] [Search] Resolving Date histogram with metric aggregation using star-tree ([#16674](https://github.com/opensearch-project/OpenSearch/pull/16674)) +- [Star Tree] [Search] Extensible design to support different query and field types ([#17137](https://github.com/opensearch-project/OpenSearch/pull/17137)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java index df2ce9096bfc1..c6c4993290c16 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNode.java @@ -10,6 +10,7 @@ import org.apache.lucene.store.RandomAccessInput; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; +import org.opensearch.search.startree.StarTreeNodeCollector; import java.io.IOException; import java.io.UncheckedIOException; @@ -192,7 +193,7 @@ public StarTreeNode getChildStarNode() throws IOException { } @Override - public StarTreeNode getChildForDimensionValue(Long dimensionValue) throws IOException { + public StarTreeNode getChildForDimensionValue(Long dimensionValue, StarTreeNode lastMatchedChild) throws IOException { // there will be no children for leaf nodes if (isLeaf()) { return null; @@ -200,7 +201,7 @@ public StarTreeNode getChildForDimensionValue(Long dimensionValue) throws IOExce StarTreeNode resultStarTreeNode = null; if (null != dimensionValue) { - resultStarTreeNode = binarySearchChild(dimensionValue); + resultStarTreeNode = binarySearchChild(dimensionValue, lastMatchedChild); } return resultStarTreeNode; } @@ -240,21 +241,29 @@ private static FixedLengthStarTreeNode matchStarTreeNodeTypeOrNull(FixedLengthSt * @return The child node if found, null otherwise * @throws IOException If there's an error reading from the input */ - private FixedLengthStarTreeNode binarySearchChild(long dimensionValue) throws IOException { + private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, StarTreeNode lastMatchedNode) throws IOException { int low = firstChildId; - // if the current node is star node, increment the low to reduce the search space - if (matchStarTreeNodeTypeOrNull(new FixedLengthStarTreeNode(in, firstChildId), StarTreeNodeType.STAR) != null) { - low++; - } - int high = getInt(LAST_CHILD_ID_OFFSET); // if the current node is null node, decrement the high to reduce the search space if (matchStarTreeNodeTypeOrNull(new FixedLengthStarTreeNode(in, high), StarTreeNodeType.NULL) != null) { high--; } + if (lastMatchedNode instanceof FixedLengthStarTreeNode) { + int lastMatchedNodeId = ((FixedLengthStarTreeNode) lastMatchedNode).nodeId(); + // Start the binary search from node after the last matched as low. + if ((lastMatchedNodeId + 1) <= high) { + low = lastMatchedNodeId + 1; + } else { + return null; + } + } else if (matchStarTreeNodeTypeOrNull(new FixedLengthStarTreeNode(in, low), StarTreeNodeType.STAR) != null) { + // if the current node is star node, increment the low to reduce the search space + low++; + } + while (low <= high) { int mid = low + (high - low) / 2; FixedLengthStarTreeNode midNode = new FixedLengthStarTreeNode(in, mid); @@ -271,6 +280,100 @@ private FixedLengthStarTreeNode binarySearchChild(long dimensionValue) throws IO return null; } + @Override + public void collectChildrenInRange(long low, long high, StarTreeNodeCollector collector) throws IOException { + if (low <= high) { + FixedLengthStarTreeNode lowStarTreeNode = binarySearchChild(low, true, null); + if (lowStarTreeNode != null) { + FixedLengthStarTreeNode highStarTreeNode = binarySearchChild(high, false, lowStarTreeNode); + if (highStarTreeNode != null) { + for (int lowNodeId = lowStarTreeNode.nodeId(); lowNodeId <= highStarTreeNode.nodeId(); ++lowNodeId) { + collector.collectStarTreeNode(new FixedLengthStarTreeNode(in, lowNodeId)); + } + } else if (lowStarTreeNode.getDimensionValue() <= high) { // Low StarTreeNode is the last default node for that dimension. + collector.collectStarTreeNode(lowStarTreeNode); + } + } + } + } + + /** + * + * @param dimensionValue : The dimension to match. + * @param matchNextHighest : If true then we try to return @dimensionValue or the next Highest. Else, we return @dimensionValue or the next Lowest. + * @param lastMatchedNode : If not null, we begin the binary search from the node after this. + * @return : Matched node or null. + * @throws IOException : + */ + private FixedLengthStarTreeNode binarySearchChild(long dimensionValue, boolean matchNextHighest, StarTreeNode lastMatchedNode) + throws IOException { + + int low = firstChildId; + int tempLow = low; + int starNodeId, nullNodeId; + starNodeId = nullNodeId = Integer.MIN_VALUE; + + // if the current node is star node, increment the tempLow to reduce the search space + if (matchStarTreeNodeTypeOrNull(new FixedLengthStarTreeNode(in, tempLow), StarTreeNodeType.STAR) != null) { + starNodeId = tempLow; + tempLow++; + } + + int high = getInt(LAST_CHILD_ID_OFFSET); + int tempHigh = high; + // if the current node is null node, decrement the tempHigh to reduce the search space + if (matchStarTreeNodeTypeOrNull(new FixedLengthStarTreeNode(in, tempHigh), StarTreeNodeType.NULL) != null) { + nullNodeId = tempHigh; + tempHigh--; + } + + if (lastMatchedNode instanceof FixedLengthStarTreeNode) { + int lastMatchedNodeId = ((FixedLengthStarTreeNode) lastMatchedNode).nodeId(); + // Start the binary search from node after the last matched as low. + if ((lastMatchedNodeId + 1) <= tempHigh) { + tempLow = lastMatchedNodeId + 1; + } else { + return null; + } + } + + while (tempLow <= tempHigh) { + int mid = tempLow + (tempHigh - tempLow) / 2; + FixedLengthStarTreeNode midNode = new FixedLengthStarTreeNode(in, mid); + long midDimensionValue = midNode.getDimensionValue(); + + if (midDimensionValue == dimensionValue) { + return midNode; + } else { + if (midDimensionValue < dimensionValue) { // Going to the right from mid to search next + tempLow = mid + 1; + // We are going out of bounds for this dimension on the right side. + if (tempLow > high || tempLow == nullNodeId) { + return matchNextHighest ? null : midNode; + } else { + FixedLengthStarTreeNode nodeGreaterThanMid = new FixedLengthStarTreeNode(in, tempLow); + if (nodeGreaterThanMid.getDimensionValue() > dimensionValue) { + return matchNextHighest ? nodeGreaterThanMid : midNode; + } + } + } else { // Going to the left from mid to search next + tempHigh = mid - 1; + // We are going out of bounds for this dimension on the left side. + if (tempHigh < low || tempHigh == starNodeId) { + return matchNextHighest ? midNode : null; + } else { + FixedLengthStarTreeNode nodeLessThanMid = new FixedLengthStarTreeNode(in, tempHigh); + if (nodeLessThanMid.getDimensionValue() < dimensionValue) { + return matchNextHighest ? midNode : nodeLessThanMid; + } + } + } + } + } + return null; + + } + @Override public Iterator getChildrenIterator() throws IOException { return new Iterator<>() { @@ -297,4 +400,8 @@ public void remove() { } }; } + + public int nodeId() { + return nodeId; + } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java index 3767f6850002a..40161a942ae4b 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/node/StarTreeNode.java @@ -9,6 +9,7 @@ package org.opensearch.index.compositeindex.datacube.startree.node; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.search.startree.StarTreeNodeCollector; import java.io.IOException; import java.util.Iterator; @@ -107,7 +108,27 @@ public interface StarTreeNode { * @return the child node for the given dimension value or null if child is not present * @throws IOException if an I/O error occurs while retrieving the child node */ - StarTreeNode getChildForDimensionValue(Long dimensionValue) throws IOException; + default StarTreeNode getChildForDimensionValue(Long dimensionValue) throws IOException { + return getChildForDimensionValue(dimensionValue, null); + } + + /** + * Matches the given @dimensionValue amongst the child default nodes for this node. + * @param dimensionValue : Value to match + * @param lastMatchedChild : If not null, binary search will use this as the start/low + * @return : Matched StarTreeNode or null if not found + * @throws IOException : Any exception in reading the node data from index. + */ + StarTreeNode getChildForDimensionValue(Long dimensionValue, StarTreeNode lastMatchedChild) throws IOException; + + /** + * Collects all matching child nodes whose dimension values lie within the range of low and high, both inclusive. + * @param low : Starting of the range ( inclusive ) + * @param high : End of the range ( inclusive ) + * @param collector : Collector to collect the matched child StarTreeNode's + * @throws IOException : Any exception in reading the node data from index. + */ + void collectChildrenInRange(long low, long high, StarTreeNodeCollector collector) throws IOException; /** * Returns the child star node for a node in the star-tree. diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java deleted file mode 100644 index e2414d9f6a8a1..0000000000000 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java +++ /dev/null @@ -1,306 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.compositeindex.datacube.startree.utils; - -import org.apache.lucene.index.DocValuesType; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.util.FixedBitSet; -import org.opensearch.common.lucene.Lucene; -import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; -import org.opensearch.index.codec.composite.CompositeIndexReader; -import org.opensearch.index.compositeindex.datacube.DateDimension; -import org.opensearch.index.compositeindex.datacube.Dimension; -import org.opensearch.index.compositeindex.datacube.Metric; -import org.opensearch.index.compositeindex.datacube.MetricStat; -import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; -import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; -import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; -import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; -import org.opensearch.index.mapper.CompositeDataCubeFieldType; -import org.opensearch.index.query.MatchAllQueryBuilder; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.search.aggregations.AggregatorFactory; -import org.opensearch.search.aggregations.LeafBucketCollector; -import org.opensearch.search.aggregations.StarTreeBucketCollector; -import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; -import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; -import org.opensearch.search.aggregations.support.ValuesSource; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.search.internal.SearchContext; -import org.opensearch.search.startree.StarTreeFilter; -import org.opensearch.search.startree.StarTreeQueryContext; - -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.function.BiConsumer; -import java.util.function.Consumer; -import java.util.stream.Collectors; - -/** - * Helper class for building star-tree query - * - * @opensearch.internal - * @opensearch.experimental - */ -public class StarTreeQueryHelper { - - /** - * Checks if the search context can be supported by star-tree - */ - public static boolean isStarTreeSupported(SearchContext context) { - return context.aggregations() != null && context.mapperService().isCompositeIndexPresent() && context.parsedPostFilter() == null; - } - - /** - * Gets StarTreeQueryContext from the search context and source builder. - * Returns null if the query and aggregation cannot be supported. - */ - public static StarTreeQueryContext getStarTreeQueryContext(SearchContext context, SearchSourceBuilder source) throws IOException { - // Current implementation assumes only single star-tree is supported - CompositeDataCubeFieldType compositeMappedFieldType = (CompositeDataCubeFieldType) context.mapperService() - .getCompositeFieldTypes() - .iterator() - .next(); - CompositeIndexFieldInfo starTree = new CompositeIndexFieldInfo( - compositeMappedFieldType.name(), - compositeMappedFieldType.getCompositeIndexType() - ); - - for (AggregatorFactory aggregatorFactory : context.aggregations().factories().getFactories()) { - // first check for aggregation is a metric aggregation - if (validateStarTreeMetricSupport(compositeMappedFieldType, aggregatorFactory)) { - continue; - } - - // if not a metric aggregation, check for applicable date histogram shape - if (validateDateHistogramSupport(compositeMappedFieldType, aggregatorFactory)) { - continue; - } - return null; - } - - // need to cache star tree values only for multiple aggregations - boolean cacheStarTreeValues = context.aggregations().factories().getFactories().length > 1; - int cacheSize = cacheStarTreeValues ? context.indexShard().segments(false).size() : -1; - - return StarTreeQueryHelper.tryCreateStarTreeQueryContext(starTree, compositeMappedFieldType, source.query(), cacheSize); - } - - /** - * Uses query builder and composite index info to form star-tree query context - */ - private static StarTreeQueryContext tryCreateStarTreeQueryContext( - CompositeIndexFieldInfo compositeIndexFieldInfo, - CompositeDataCubeFieldType compositeFieldType, - QueryBuilder queryBuilder, - int cacheStarTreeValuesSize - ) { - Map queryMap; - if (queryBuilder == null || queryBuilder instanceof MatchAllQueryBuilder) { - queryMap = null; - } else if (queryBuilder instanceof TermQueryBuilder termQueryBuilder) { - // TODO: Add support for keyword fields - Dimension matchedDimension = compositeFieldType.getDimensions() - .stream() - .filter(d -> (d.getField().equals(termQueryBuilder.fieldName()) && d.getDocValuesType() == DocValuesType.SORTED_NUMERIC)) - .findFirst() - .orElse(null); - if (matchedDimension == null) { - return null; - } - queryMap = Map.of(termQueryBuilder.fieldName(), Long.parseLong(termQueryBuilder.value().toString())); - } else { - return null; - } - return new StarTreeQueryContext(compositeIndexFieldInfo, queryMap, cacheStarTreeValuesSize); - } - - private static boolean validateStarTreeMetricSupport( - CompositeDataCubeFieldType compositeIndexFieldInfo, - AggregatorFactory aggregatorFactory - ) { - if (aggregatorFactory instanceof MetricAggregatorFactory metricAggregatorFactory - && metricAggregatorFactory.getSubFactories().getFactories().length == 0) { - String field; - Map> supportedMetrics = compositeIndexFieldInfo.getMetrics() - .stream() - .collect(Collectors.toMap(Metric::getField, Metric::getMetrics)); - - MetricStat metricStat = metricAggregatorFactory.getMetricStat(); - field = metricAggregatorFactory.getField(); - - return supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat); - } - return false; - } - - private static boolean validateDateHistogramSupport( - CompositeDataCubeFieldType compositeIndexFieldInfo, - AggregatorFactory aggregatorFactory - ) { - if (!(aggregatorFactory instanceof DateHistogramAggregatorFactory dateHistogramAggregatorFactory) - || aggregatorFactory.getSubFactories().getFactories().length < 1) { - return false; - } - - // Find the DateDimension in the dimensions list - DateDimension starTreeDateDimension = null; - for (Dimension dimension : compositeIndexFieldInfo.getDimensions()) { - if (dimension instanceof DateDimension) { - starTreeDateDimension = (DateDimension) dimension; - break; - } - } - - // If no DateDimension is found, validation fails - if (starTreeDateDimension == null) { - return false; - } - - // Ensure the rounding is not null - if (dateHistogramAggregatorFactory.getRounding() == null) { - return false; - } - - // Find the closest valid interval in the DateTimeUnitRounding class associated with star tree - DateTimeUnitRounding rounding = starTreeDateDimension.findClosestValidInterval( - new DateTimeUnitAdapter(dateHistogramAggregatorFactory.getRounding()) - ); - if (rounding == null) { - return false; - } - - // Validate all sub-factories - for (AggregatorFactory subFactory : aggregatorFactory.getSubFactories().getFactories()) { - if (!validateStarTreeMetricSupport(compositeIndexFieldInfo, subFactory)) { - return false; - } - } - return true; - } - - public static CompositeIndexFieldInfo getSupportedStarTree(SearchContext context) { - StarTreeQueryContext starTreeQueryContext = context.getStarTreeQueryContext(); - return (starTreeQueryContext != null) ? starTreeQueryContext.getStarTree() : null; - } - - public static StarTreeValues getStarTreeValues(LeafReaderContext context, CompositeIndexFieldInfo starTree) throws IOException { - SegmentReader reader = Lucene.segmentReader(context.reader()); - if (!(reader.getDocValuesReader() instanceof CompositeIndexReader)) { - return null; - } - CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); - return (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(starTree); - } - - /** - * Get the star-tree leaf collector - * This collector computes the aggregation prematurely and invokes an early termination collector - */ - public static LeafBucketCollector getStarTreeLeafCollector( - SearchContext context, - ValuesSource.Numeric valuesSource, - LeafReaderContext ctx, - LeafBucketCollector sub, - CompositeIndexFieldInfo starTree, - String metric, - Consumer valueConsumer, - Runnable finalConsumer - ) throws IOException { - StarTreeValues starTreeValues = getStarTreeValues(ctx, starTree); - assert starTreeValues != null; - String fieldName = ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(); - String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues(starTree.getField(), fieldName, metric); - - assert starTreeValues != null; - SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator( - metricName - ); - // Obtain a FixedBitSet of matched star tree document IDs - FixedBitSet filteredValues = getStarTreeFilteredValues(context, ctx, starTreeValues); - assert filteredValues != null; - - int numBits = filteredValues.length(); // Get the number of the filtered values (matching docs) - if (numBits > 0) { - // Iterate over the filtered values - for (int bit = filteredValues.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) - ? filteredValues.nextSetBit(bit + 1) - : DocIdSetIterator.NO_MORE_DOCS) { - // Advance to the entryId in the valuesIterator - if (valuesIterator.advanceExact(bit) == false) { - continue; // Skip if no more entries - } - - // Iterate over the values for the current entryId - for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { - long value = valuesIterator.nextValue(); - valueConsumer.accept(value); // Apply the consumer operation (e.g., max, sum) - } - } - } - - // Call the final consumer after processing all entries - finalConsumer.run(); - - // Terminate after pre-computing aggregation - throw new CollectionTerminatedException(); - } - - public static StarTreeBucketCollector getStarTreeBucketMetricCollector( - CompositeIndexFieldInfo starTree, - String metric, - ValuesSource.Numeric valuesSource, - StarTreeBucketCollector parentCollector, - Consumer growArrays, - BiConsumer updateBucket - ) throws IOException { - assert parentCollector != null; - return new StarTreeBucketCollector(parentCollector) { - String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( - starTree.getField(), - ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(), - metric - ); - SortedNumericStarTreeValuesIterator metricValuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues - .getMetricValuesIterator(metricName); - - @Override - public void collectStarTreeEntry(int starTreeEntryBit, long bucket) throws IOException { - growArrays.accept(bucket); - // Advance the valuesIterator to the current bit - if (!metricValuesIterator.advanceExact(starTreeEntryBit)) { - return; // Skip if no entries for this document - } - long metricValue = metricValuesIterator.nextValue(); - updateBucket.accept(bucket, metricValue); - } - }; - } - - /** - * Get the filtered values for the star-tree query - * Cache the results in case of multiple aggregations (if cache is initialized) - * @return FixedBitSet of matched document IDs - */ - public static FixedBitSet getStarTreeFilteredValues(SearchContext context, LeafReaderContext ctx, StarTreeValues starTreeValues) - throws IOException { - FixedBitSet result = context.getStarTreeQueryContext().getStarTreeValues(ctx); - if (result == null) { - result = StarTreeFilter.getStarTreeResult(starTreeValues, context.getStarTreeQueryContext().getQueryMap(), Set.of()); - context.getStarTreeQueryContext().setStarTreeValues(ctx, result); - } - return result; - } -} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java index 4b4bfa6a915eb..595965c98ea07 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java @@ -26,6 +26,11 @@ public SortedNumericStarTreeValuesIterator(DocIdSetIterator docIdSetIterator) { super(docIdSetIterator); } + @Override + public long value() throws IOException { + return nextValue(); + } + public long nextValue() throws IOException { return ((SortedNumericDocValues) docIdSetIterator).nextValue(); } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java index 0cddffe5877e9..1605bd9cfc014 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedSetStarTreeValuesIterator.java @@ -29,6 +29,17 @@ public SortedSetStarTreeValuesIterator(DocIdSetIterator docIdSetIterator) { super(docIdSetIterator); } + @Override + public long value() throws IOException { + return nextOrd(); + } + + @Override + public boolean advanceExact(int target) throws IOException { + return ((SortedSetDocValues) docIdSetIterator).advanceExact(target); + } + + // TODO : Remove this and merge @org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator to use value() public long nextOrd() throws IOException { return ((SortedSetDocValues) docIdSetIterator).nextOrd(); } @@ -56,4 +67,5 @@ public TermsEnum termsEnum() throws IOException { public TermsEnum intersect(CompiledAutomaton automaton) throws IOException { return ((SortedSetDocValues) docIdSetIterator).intersect(automaton); } + } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/StarTreeValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/StarTreeValuesIterator.java index 32866f3e50092..d9ee67fe1b0d5 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/StarTreeValuesIterator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/StarTreeValuesIterator.java @@ -45,4 +45,9 @@ public int advance(int target) throws IOException { public long cost() { return docIdSetIterator.cost(); } + + public abstract long value() throws IOException; + + public abstract boolean advanceExact(int target) throws IOException; + } diff --git a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java index f3fc3f4b2aa95..faf3f1bb654c8 100644 --- a/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/NumberFieldMapper.java @@ -1305,7 +1305,7 @@ public static boolean hasDecimalPart(Object number) { /** * Returns -1, 0, or 1 if the value is lower than, equal to, or greater than 0 */ - static double signum(Object value) { + public static double signum(Object value) { if (value instanceof Number) { double doubleValue = ((Number) value).doubleValue(); return Math.signum(doubleValue); diff --git a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java index 0610752e532e7..62c8c0e25596c 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryShardContext.java +++ b/server/src/main/java/org/opensearch/index/query/QueryShardContext.java @@ -76,6 +76,7 @@ import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.lookup.SearchLookup; +import org.opensearch.search.startree.StarTreeQueryContext; import org.opensearch.transport.RemoteClusterAware; import java.io.IOException; @@ -128,6 +129,8 @@ public class QueryShardContext extends BaseQueryRewriteContext { private boolean keywordIndexOrDocValuesEnabled; private boolean isInnerHitQuery; + private StarTreeQueryContext starTreeQueryContext; + public QueryShardContext( int shardId, IndexSettings indexSettings, @@ -379,6 +382,14 @@ public > IFD getForField(MappedFieldType fieldType ); } + public StarTreeQueryContext getStarTreeQueryContext() { + return starTreeQueryContext; + } + + public void setStarTreeQueryContext(StarTreeQueryContext starTreeQueryContext) { + this.starTreeQueryContext = starTreeQueryContext; + } + public void addNamedQuery(String name, Query query) { if (query != null) { namedQueries.put(name, query); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index d4380eb09e360..866cf0d62b033 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -77,7 +77,6 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.DerivedFieldResolver; import org.opensearch.index.mapper.DerivedFieldResolverFactory; @@ -141,6 +140,7 @@ import org.opensearch.search.sort.SortBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.search.startree.StarTreeQueryContext; +import org.opensearch.search.startree.StarTreeQueryHelper; import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.completion.CompletionSuggestion; import org.opensearch.tasks.TaskResourceTrackingService; @@ -1548,15 +1548,11 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (this.indicesService.getCompositeIndexSettings() != null && this.indicesService.getCompositeIndexSettings().isStarTreeIndexCreationEnabled() && StarTreeQueryHelper.isStarTreeSupported(context)) { - try { - StarTreeQueryContext starTreeQueryContext = StarTreeQueryHelper.getStarTreeQueryContext(context, source); - if (starTreeQueryContext != null) { - context.starTreeQueryContext(starTreeQueryContext); - logger.debug("can use star tree"); - } else { - logger.debug("cannot use star tree"); - } - } catch (IOException ignored) {} + StarTreeQueryContext starTreeQueryContext = new StarTreeQueryContext(context, source.query()); + boolean consolidated = starTreeQueryContext.consolidateAllFilters(context); + if (consolidated) { + queryShardContext.setStarTreeQueryContext(starTreeQueryContext); + } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 49672831625e4..451b96dc3cf9c 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -45,7 +45,6 @@ import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; @@ -68,18 +67,20 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; -import org.opensearch.search.startree.StarTreeFilter; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.StarTreeTraversalUtil; +import org.opensearch.search.startree.filter.DimensionFilter; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Function; -import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; import static org.opensearch.search.aggregations.bucket.filterrewrite.DateHistogramAggregatorBridge.segmentMatchAll; +import static org.opensearch.search.startree.StarTreeQueryHelper.getSupportedStarTree; /** * An aggregator for date values. Every date is rounded down using a configured @@ -172,7 +173,9 @@ protected Function bucketOrdProducer() { } }; filterRewriteOptimizationContext = new FilterRewriteOptimizationContext(bridge, parent, subAggregators.length, context); - this.starTreeDateDimension = (context.getStarTreeQueryContext() != null) ? fetchStarTreeCalendarUnit() : null; + this.starTreeDateDimension = (context.getQueryShardContext().getStarTreeQueryContext() != null) + ? fetchStarTreeCalendarUnit() + : null; } @Override @@ -193,7 +196,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol if (optimized) throw new CollectionTerminatedException(); SortedNumericDocValues values = valuesSource.longValues(ctx); - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (preComputeWithStarTree(ctx, supportedStarTree) == true) { throw new CollectionTerminatedException(); @@ -264,7 +267,15 @@ public StarTreeBucketCollector getStarTreeBucketCollector( StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); return new StarTreeBucketCollector( starTreeValues, - StarTreeFilter.getStarTreeResult(starTreeValues, context.getStarTreeQueryContext().getQueryMap(), Set.of(starTreeDateDimension)) + StarTreeTraversalUtil.getStarTreeResult( + starTreeValues, + StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + context.getQueryShardContext().getStarTreeQueryContext().getBaseQueryStarTreeFilter(), + starTreeDateDimension, + List.of(DimensionFilter.MATCH_ALL_DEFAULT) + ), + context + ) ) { @Override public void setSubCollectors() throws IOException { diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java index c9f5bb7f3534b..f71b6679a7c4d 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java @@ -44,7 +44,6 @@ import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.fielddata.SortedNumericDoubleValues; @@ -58,12 +57,13 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; import java.io.IOException; import java.util.Map; -import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getStarTreeFilteredValues; -import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; +import static org.opensearch.search.startree.StarTreeQueryHelper.getStarTreeFilteredValues; +import static org.opensearch.search.startree.StarTreeQueryHelper.getSupportedStarTree; /** * Aggregate all docs into an average @@ -108,7 +108,7 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java index 49aaf5e0670bb..c64a6cf29fb63 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java @@ -43,7 +43,6 @@ import org.opensearch.common.util.DoubleArray; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.MetricStat; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; @@ -57,6 +56,7 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; import java.io.IOException; import java.util.Arrays; @@ -64,7 +64,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; -import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; +import static org.opensearch.search.startree.StarTreeQueryHelper.getSupportedStarTree; /** * Aggregate all docs into a max value @@ -130,14 +130,14 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc } } - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators return LeafBucketCollector.NO_OP_COLLECTOR; } - return getStarTreeCollector(ctx, sub, supportedStarTree); + getStarTreeCollector(ctx, sub, supportedStarTree); } return getDefaultLeafCollector(ctx, sub); } @@ -167,21 +167,11 @@ public void collect(int doc, long bucket) throws IOException { }; } - public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) - throws IOException { + public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { AtomicReference max = new AtomicReference<>(maxes.get(0)); - return StarTreeQueryHelper.getStarTreeLeafCollector( - context, - valuesSource, - ctx, - sub, - starTree, - MetricStat.MAX.getTypeName(), - value -> { - max.set(Math.max(max.get(), (NumericUtils.sortableLongToDouble(value)))); - }, - () -> maxes.set(0, max.get()) - ); + StarTreeQueryHelper.getStarTreeLeafCollector(context, valuesSource, ctx, sub, starTree, MetricStat.MAX.getTypeName(), value -> { + max.set(Math.max(max.get(), (NumericUtils.sortableLongToDouble(value)))); + }, () -> maxes.set(0, max.get())); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java index febb227dd4e2a..5cdee536cde19 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java @@ -43,7 +43,6 @@ import org.opensearch.common.util.DoubleArray; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.MetricStat; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; @@ -57,13 +56,14 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; import java.io.IOException; import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; -import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; +import static org.opensearch.search.startree.StarTreeQueryHelper.getSupportedStarTree; /** * Aggregate all docs into a min value @@ -129,14 +129,14 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc } } - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators return LeafBucketCollector.NO_OP_COLLECTOR; } - return getStarTreeCollector(ctx, sub, supportedStarTree); + getStarTreeCollector(ctx, sub, supportedStarTree); } return getDefaultLeafCollector(ctx, sub); } @@ -164,21 +164,11 @@ public void collect(int doc, long bucket) throws IOException { }; } - public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) - throws IOException { + public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { AtomicReference min = new AtomicReference<>(mins.get(0)); - return StarTreeQueryHelper.getStarTreeLeafCollector( - context, - valuesSource, - ctx, - sub, - starTree, - MetricStat.MIN.getTypeName(), - value -> { - min.set(Math.min(min.get(), (NumericUtils.sortableLongToDouble(value)))); - }, - () -> mins.set(0, min.get()) - ); + StarTreeQueryHelper.getStarTreeLeafCollector(context, valuesSource, ctx, sub, starTree, MetricStat.MIN.getTypeName(), value -> { + min.set(Math.min(min.get(), (NumericUtils.sortableLongToDouble(value)))); + }, () -> mins.set(0, min.get())); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java index 7376cc1e93b41..edcfb61263fc1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java @@ -39,7 +39,6 @@ import org.opensearch.common.util.DoubleArray; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.MetricStat; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.SortedNumericDoubleValues; import org.opensearch.search.DocValueFormat; import org.opensearch.search.aggregations.Aggregator; @@ -51,11 +50,12 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; import java.io.IOException; import java.util.Map; -import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; +import static org.opensearch.search.startree.StarTreeQueryHelper.getSupportedStarTree; /** * Aggregate all docs into a single sum value @@ -98,14 +98,14 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc return LeafBucketCollector.NO_OP_COLLECTOR; } - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators return LeafBucketCollector.NO_OP_COLLECTOR; } - return getStarTreeCollector(ctx, sub, supportedStarTree); + getStarTreeCollector(ctx, sub, supportedStarTree); } return getDefaultLeafCollector(ctx, sub); } @@ -140,11 +140,10 @@ public void collect(int doc, long bucket) throws IOException { }; } - public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) - throws IOException { + public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), compensations.get(0)); - return StarTreeQueryHelper.getStarTreeLeafCollector( + StarTreeQueryHelper.getStarTreeLeafCollector( context, valuesSource, ctx, diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index f6f4a8a56eddc..d298361391ad9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -39,7 +39,6 @@ import org.opensearch.common.util.LongArray; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.MetricStat; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.MultiGeoPointValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.search.aggregations.Aggregator; @@ -51,11 +50,12 @@ import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.aggregations.support.ValuesSourceConfig; import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; import java.io.IOException; import java.util.Map; -import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper.getSupportedStarTree; +import static org.opensearch.search.startree.StarTreeQueryHelper.getSupportedStarTree; /** * A field data based aggregator that counts the number of values a specific field has within the aggregation context. @@ -96,14 +96,14 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc if (valuesSource instanceof ValuesSource.Numeric) { - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context); + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators return LeafBucketCollector.NO_OP_COLLECTOR; } - return getStarTreeCollector(ctx, sub, supportedStarTree); + getStarTreeCollector(ctx, sub, supportedStarTree); } final SortedNumericDocValues values = ((ValuesSource.Numeric) valuesSource).longValues(ctx); @@ -145,9 +145,8 @@ public void collect(int doc, long bucket) throws IOException { }; } - public LeafBucketCollector getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) - throws IOException { - return StarTreeQueryHelper.getStarTreeLeafCollector( + public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { + StarTreeQueryHelper.getStarTreeLeafCollector( context, (ValuesSource.Numeric) valuesSource, ctx, diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index b7ea06d2989e5..b539981da4ebd 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -76,7 +76,6 @@ import org.opensearch.search.query.ReduceableSearchResult; import org.opensearch.search.rescore.RescoreContext; import org.opensearch.search.sort.SortAndFormats; -import org.opensearch.search.startree.StarTreeQueryContext; import org.opensearch.search.suggest.SuggestionSearchContext; import java.util.Collection; @@ -126,7 +125,6 @@ public List toInternalAggregations(Collection co private final AtomicBoolean closed = new AtomicBoolean(false); private InnerHitsContext innerHitsContext; private volatile boolean searchTimedOut; - private StarTreeQueryContext starTreeQueryContext; protected SearchContext() {} @@ -531,13 +529,4 @@ public int cardinalityAggregationPruningThreshold() { public boolean keywordIndexOrDocValuesEnabled() { return false; } - - public SearchContext starTreeQueryContext(StarTreeQueryContext starTreeQueryContext) { - this.starTreeQueryContext = starTreeQueryContext; - return this; - } - - public StarTreeQueryContext getStarTreeQueryContext() { - return this.starTreeQueryContext; - } } diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeNodeCollector.java b/server/src/main/java/org/opensearch/search/startree/StarTreeNodeCollector.java new file mode 100644 index 0000000000000..9d3dcd6824874 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeNodeCollector.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; + +/** + * Collects one or more @{@link StarTreeNode}'s + */ +@ExperimentalApi +public interface StarTreeNodeCollector { + /** + * Called to collect a @{@link StarTreeNode} + * @param node : Node to collect + */ + void collectStarTreeNode(StarTreeNode node); + +} diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java index cda3a25b30e53..ca0ab9ce52f6e 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryContext.java @@ -8,72 +8,209 @@ package org.opensearch.search.startree; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.FixedBitSet; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.compositeindex.datacube.DateDimension; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitRounding; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregatorFactory; +import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.filter.StarTreeFilter; +import org.opensearch.search.startree.filter.provider.StarTreeFilterProvider; +import java.io.IOException; +import java.util.Collections; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; /** - * Query class for querying star tree data structure. - * - * @opensearch.experimental + * Stores the star tree related context of a search request. */ @ExperimentalApi public class StarTreeQueryContext { - /** - * Star tree field info - * This is used to get the star tree data structure - */ - private final CompositeIndexFieldInfo starTree; + private final CompositeDataCubeFieldType compositeMappedFieldType; /** - * Map of field name to a value to be queried for that field - * This is used to filter the data based on the query + * Cache for leaf results + * This is used to cache the results for each leaf reader context + * to avoid reading the filtered values from the leaf reader context multiple times */ - private final Map queryMap; + // TODO : Change caching to be based on aggregation specific filters. + private final FixedBitSet[] perSegmentNodeIdsCache; - /** - * Cache for leaf results - * This is used to cache the results for each leaf reader context - * to avoid reading the filtered values from the leaf reader context multiple times - */ - private final FixedBitSet[] starTreeValues; - - public StarTreeQueryContext(CompositeIndexFieldInfo starTree, Map queryMap, int numSegmentsCache) { - this.starTree = starTree; - this.queryMap = queryMap; - if (numSegmentsCache > -1) { - starTreeValues = new FixedBitSet[numSegmentsCache]; + private final QueryBuilder baseQueryBuilder; + private StarTreeFilter baseStarTreeFilter; + + // TODO : Implement storing and aggregating aggregation specific filters. + + public StarTreeQueryContext(SearchContext context, QueryBuilder baseQueryBuilder) { + this.baseQueryBuilder = baseQueryBuilder; + // TODO : We need to select the most appropriate one from multiple star tree field types. + compositeMappedFieldType = (CompositeDataCubeFieldType) context.mapperService().getCompositeFieldTypes().iterator().next(); + // need to cache star tree values only for multiple aggregations + boolean cacheStarTreeValues = context.aggregations().factories().getFactories().length > 1; + int cacheSize = cacheStarTreeValues ? context.indexShard().segments(false).size() : -1; + if (cacheSize > -1) { + perSegmentNodeIdsCache = new FixedBitSet[cacheSize]; } else { - starTreeValues = null; + perSegmentNodeIdsCache = null; + } + } + + // TODO : Make changes to change visibility into package private. Handle the same in @org.opensearch.search.SearchServiceStarTreeTests + public StarTreeQueryContext(CompositeDataCubeFieldType compositeMappedFieldType, QueryBuilder baseQueryBuilder, int cacheSize) { + this.compositeMappedFieldType = compositeMappedFieldType; + this.baseQueryBuilder = baseQueryBuilder; + if (cacheSize > -1) { + perSegmentNodeIdsCache = new FixedBitSet[cacheSize]; + } else { + perSegmentNodeIdsCache = null; } } public CompositeIndexFieldInfo getStarTree() { - return starTree; + return new CompositeIndexFieldInfo(compositeMappedFieldType.name(), compositeMappedFieldType.getCompositeIndexType()); } - public Map getQueryMap() { - return queryMap; + public FixedBitSet maybeGetCachedNodeIdsForSegment(int ordinal) { + return perSegmentNodeIdsCache != null ? perSegmentNodeIdsCache[ordinal] : null; } - public FixedBitSet[] getStarTreeValues() { - return starTreeValues; + public FixedBitSet[] getAllCachedValues() { + return perSegmentNodeIdsCache; } - public FixedBitSet getStarTreeValues(LeafReaderContext ctx) { - if (starTreeValues != null) { - return starTreeValues[ctx.ord]; + public void maybeSetCachedNodeIdsForSegment(int key, FixedBitSet values) { + if (perSegmentNodeIdsCache != null) { + perSegmentNodeIdsCache[key] = values; } - return null; } - public void setStarTreeValues(LeafReaderContext ctx, FixedBitSet values) { - if (starTreeValues != null) { - starTreeValues[ctx.ord] = values; + /** + * Generates the Base StarTreeFilter and then recursively merges + * any aggregation specific STF. + * @return true if recursively all filters were consolidated, else false. + */ + public boolean consolidateAllFilters(SearchContext context) { + // Validate the fields and metrics required by aggregations are supported in star tree + for (AggregatorFactory aggregatorFactory : context.aggregations().factories().getFactories()) { + // first check for aggregation is a metric aggregation + if (validateStarTreeMetricSupport(compositeMappedFieldType, aggregatorFactory)) { + continue; + } + + // if not a metric aggregation, check for applicable date histogram shape + if (validateDateHistogramSupport(compositeMappedFieldType, aggregatorFactory)) { + continue; + } + return false; + } + + // Generate the base Star Tree Filter + if (baseQueryBuilder != null) { + baseStarTreeFilter = getStarTreeFilter(context, baseQueryBuilder, compositeMappedFieldType); + return baseStarTreeFilter != null; // Base Query is not supported by star tree filter. + } + // TODO : Generate StarTreeFilter specific to aggregations by merging base and their parents. + return true; + } + + public StarTreeFilter getBaseQueryStarTreeFilter() { + if (baseStarTreeFilter == null) { + return new StarTreeFilter(Collections.emptyMap()); } + return baseStarTreeFilter; } + + // TODO : Push this validation down to a common method in AggregatorFactory or an equivalent place. + private static boolean validateStarTreeMetricSupport( + CompositeDataCubeFieldType compositeIndexFieldInfo, + AggregatorFactory aggregatorFactory + ) { + if (aggregatorFactory instanceof MetricAggregatorFactory && aggregatorFactory.getSubFactories().getFactories().length == 0) { + String field; + Map> supportedMetrics = compositeIndexFieldInfo.getMetrics() + .stream() + .collect(Collectors.toMap(Metric::getField, Metric::getMetrics)); + + MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat(); + field = ((MetricAggregatorFactory) aggregatorFactory).getField(); + + return field != null && supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat); + } + return false; + } + + private StarTreeFilter getStarTreeFilter( + SearchContext context, + QueryBuilder queryBuilder, + CompositeDataCubeFieldType compositeMappedFieldType + ) { + StarTreeFilterProvider starTreeFilterProvider = StarTreeFilterProvider.SingletonFactory.getProvider(queryBuilder); + // The query builder's support is not implemented. + if (starTreeFilterProvider == null) { + return null; + } + try { + return starTreeFilterProvider.getFilter(context, queryBuilder, compositeMappedFieldType); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private static boolean validateDateHistogramSupport( + CompositeDataCubeFieldType compositeIndexFieldInfo, + AggregatorFactory aggregatorFactory + ) { + if (!(aggregatorFactory instanceof DateHistogramAggregatorFactory dateHistogramAggregatorFactory) + || aggregatorFactory.getSubFactories().getFactories().length < 1) { + return false; + } + + // Find the DateDimension in the dimensions list + DateDimension starTreeDateDimension = null; + for (Dimension dimension : compositeIndexFieldInfo.getDimensions()) { + if (dimension instanceof DateDimension) { + starTreeDateDimension = (DateDimension) dimension; + break; + } + } + + // If no DateDimension is found, validation fails + if (starTreeDateDimension == null) { + return false; + } + + // Ensure the rounding is not null + if (dateHistogramAggregatorFactory.getRounding() == null) { + return false; + } + + // Find the closest valid interval in the DateTimeUnitRounding class associated with star tree + DateTimeUnitRounding rounding = starTreeDateDimension.findClosestValidInterval( + new DateTimeUnitAdapter(dateHistogramAggregatorFactory.getRounding()) + ); + if (rounding == null) { + return false; + } + + // Validate all sub-factories + for (AggregatorFactory subFactory : aggregatorFactory.getSubFactories().getFactories()) { + if (!validateStarTreeMetricSupport(compositeIndexFieldInfo, subFactory)) { + return false; + } + } + return true; + } + } diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java new file mode 100644 index 0000000000000..edbccb53853d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java @@ -0,0 +1,203 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.search.CollectionTerminatedException; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.util.FixedBitSet; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; +import org.opensearch.index.codec.composite.CompositeIndexReader; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.query.QueryShardContext; +import org.opensearch.search.aggregations.LeafBucketCollector; +import org.opensearch.search.aggregations.StarTreeBucketCollector; +import org.opensearch.search.aggregations.support.ValuesSource; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.filter.DimensionFilter; +import org.opensearch.search.startree.filter.StarTreeFilter; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * Helper class for building star-tree query + * + * @opensearch.internal + * @opensearch.experimental + */ +public class StarTreeQueryHelper { + + private static StarTreeValues starTreeValues; + + /** + * Checks if the search context can be supported by star-tree + */ + public static boolean isStarTreeSupported(SearchContext context) { + return context.aggregations() != null && context.mapperService().isCompositeIndexPresent() && context.parsedPostFilter() == null; + } + + public static CompositeIndexFieldInfo getSupportedStarTree(QueryShardContext context) { + StarTreeQueryContext starTreeQueryContext = context.getStarTreeQueryContext(); + return (starTreeQueryContext != null) ? starTreeQueryContext.getStarTree() : null; + } + + public static StarTreeValues getStarTreeValues(LeafReaderContext context, CompositeIndexFieldInfo starTree) throws IOException { + SegmentReader reader = Lucene.segmentReader(context.reader()); + if (!(reader.getDocValuesReader() instanceof CompositeIndexReader)) { + return null; + } + CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + return (StarTreeValues) starTreeDocValuesReader.getCompositeIndexValues(starTree); + } + + /** + * Get the star-tree leaf collector + * This collector computes the aggregation prematurely and invokes an early termination collector + */ + public static void getStarTreeLeafCollector( + SearchContext context, + ValuesSource.Numeric valuesSource, + LeafReaderContext ctx, + LeafBucketCollector sub, + CompositeIndexFieldInfo starTree, + String metric, + Consumer valueConsumer, + Runnable finalConsumer + ) throws IOException { + StarTreeValues starTreeValues = getStarTreeValues(ctx, starTree); + assert starTreeValues != null; + String fieldName = ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(); + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues(starTree.getField(), fieldName, metric); + + SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator( + metricName + ); + // Obtain a FixedBitSet of matched star tree document IDs + FixedBitSet filteredValues = getStarTreeFilteredValues(context, ctx, starTreeValues); + + int numBits = filteredValues.length(); // Get the number of the filtered values (matching docs) + if (numBits > 0) { + // Iterate over the filtered values + for (int bit = filteredValues.nextSetBit(0); bit != DocIdSetIterator.NO_MORE_DOCS; bit = (bit + 1 < numBits) + ? filteredValues.nextSetBit(bit + 1) + : DocIdSetIterator.NO_MORE_DOCS) { + // Advance to the entryId in the valuesIterator + if (valuesIterator.advanceExact(bit) == false) { + continue; // Skip if no more entries + } + + // Iterate over the values for the current entryId + for (int i = 0, count = valuesIterator.entryValueCount(); i < count; i++) { + long value = valuesIterator.nextValue(); + valueConsumer.accept(value); // Apply the consumer operation (e.g., max, sum) + } + } + } + + // Call the final consumer after processing all entries + finalConsumer.run(); + + // FIXME : Remove after @msfroh PR for precompute + // Terminate after pre-computing aggregation + throw new CollectionTerminatedException(); + } + + /** + * Get the filtered values for the star-tree query + * Cache the results in case of multiple aggregations (if cache is initialized) + * @return FixedBitSet of matched document IDs + */ + public static FixedBitSet getStarTreeFilteredValues(SearchContext context, LeafReaderContext ctx, StarTreeValues starTreeValues) + throws IOException { + FixedBitSet result = context.getQueryShardContext().getStarTreeQueryContext().maybeGetCachedNodeIdsForSegment(ctx.ord); + if (result == null) { + result = StarTreeTraversalUtil.getStarTreeResult( + starTreeValues, + context.getQueryShardContext().getStarTreeQueryContext().getBaseQueryStarTreeFilter(), + context + ); + } + context.getQueryShardContext().getStarTreeQueryContext().maybeSetCachedNodeIdsForSegment(ctx.ord, result); + return result; + } + + public static Dimension getMatchingDimensionOrThrow(String dimensionName, List orderedDimensions) { + Dimension matchingDimension = getMatchingDimensionOrNull(dimensionName, orderedDimensions); + if (matchingDimension == null) { + throw new IllegalStateException("No matching dimension found for [" + dimensionName + "]"); + } + return matchingDimension; + } + + public static Dimension getMatchingDimensionOrNull(String dimensionName, List orderedDimensions) { + List matchingDimensions = orderedDimensions.stream().filter(x -> x.getField().equals(dimensionName)).toList(); + if (matchingDimensions.size() != 1) { + return null; + } + return matchingDimensions.get(0); + } + + public static StarTreeBucketCollector getStarTreeBucketMetricCollector( + CompositeIndexFieldInfo starTree, + String metric, + ValuesSource.Numeric valuesSource, + StarTreeBucketCollector parentCollector, + Consumer growArrays, + BiConsumer updateBucket + ) throws IOException { + assert parentCollector != null; + return new StarTreeBucketCollector(parentCollector) { + String metricName = StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( + starTree.getField(), + ((ValuesSource.Numeric.FieldData) valuesSource).getIndexFieldName(), + metric + ); + SortedNumericStarTreeValuesIterator metricValuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues + .getMetricValuesIterator(metricName); + + @Override + public void collectStarTreeEntry(int starTreeEntryBit, long bucket) throws IOException { + growArrays.accept(bucket); + // Advance the valuesIterator to the current bit + if (!metricValuesIterator.advanceExact(starTreeEntryBit)) { + return; // Skip if no entries for this document + } + long metricValue = metricValuesIterator.nextValue(); + updateBucket.accept(bucket, metricValue); + } + }; + } + + public static StarTreeFilter mergeDimensionFilterIfNotExists( + StarTreeFilter baseStarTreeFilter, + String dimensionToMerge, + List dimensionFiltersToMerge + ) { + Map> dimensionFilterMap = new HashMap<>(baseStarTreeFilter.getDimensions().size()); + for (String baseDimension : baseStarTreeFilter.getDimensions()) { + dimensionFilterMap.put(baseDimension, baseStarTreeFilter.getFiltersForDimension(baseDimension)); + } + // Don't add groupBy when already present in base filter. + if (!dimensionFilterMap.containsKey(dimensionToMerge)) { + dimensionFilterMap.put(dimensionToMerge, dimensionFiltersToMerge); + } + return new StarTreeFilter(dimensionFilterMap); + } + +} diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java b/server/src/main/java/org/opensearch/search/startree/StarTreeTraversalUtil.java similarity index 76% rename from server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java rename to server/src/main/java/org/opensearch/search/startree/StarTreeTraversalUtil.java index 1629b9d0c1db4..cf9c125e84b79 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeFilter.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeTraversalUtil.java @@ -19,6 +19,9 @@ import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.filter.DimensionFilter; +import org.opensearch.search.startree.filter.StarTreeFilter; import java.io.IOException; import java.util.ArrayDeque; @@ -27,7 +30,6 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Queue; import java.util.Set; import java.util.stream.Collectors; @@ -40,20 +42,24 @@ * @opensearch.experimental * @opensearch.internal */ -public class StarTreeFilter { - private static final Logger logger = LogManager.getLogger(StarTreeFilter.class); +public class StarTreeTraversalUtil { + private static final Logger logger = LogManager.getLogger(StarTreeTraversalUtil.class); /** - * First go over the star tree and try to match as many dimensions as possible - * For the remaining columns, use star-tree doc values to match them + * First go over the star tree and try to match as many dimensions as possible + * For the remaining columns, use star-tree doc values to match them */ - public static FixedBitSet getStarTreeResult( - StarTreeValues starTreeValues, - Map predicateEvaluators, - Set groupByField - ) throws IOException { - Map queryMap = predicateEvaluators != null ? predicateEvaluators : Collections.emptyMap(); - StarTreeResult starTreeResult = traverseStarTree(starTreeValues, queryMap, groupByField); + public static FixedBitSet getStarTreeResult(StarTreeValues starTreeValues, StarTreeFilter starTreeFilter, SearchContext searchContext) + throws IOException { + + // Initialising all dimension filters for this segment + for (String dimension : starTreeFilter.getDimensions()) { + for (DimensionFilter dimensionFilter : starTreeFilter.getFiltersForDimension(dimension)) { + dimensionFilter.initialiseForSegment(starTreeValues, searchContext); + } + } + + StarTreeResult starTreeResult = traverseStarTree(starTreeValues, starTreeFilter); // Initialize FixedBitSet with size maxMatchedDoc + 1 FixedBitSet bitSet = new FixedBitSet(starTreeResult.maxMatchedDoc + 1); @@ -78,11 +84,9 @@ public static FixedBitSet getStarTreeResult( for (String remainingPredicateColumn : starTreeResult.remainingPredicateColumns) { logger.debug("remainingPredicateColumn : {}, maxMatchedDoc : {} ", remainingPredicateColumn, starTreeResult.maxMatchedDoc); - SortedNumericStarTreeValuesIterator ndv = (SortedNumericStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( - remainingPredicateColumn - ); - - long queryValue = queryMap.get(remainingPredicateColumn); // Get the query value directly + StarTreeValuesIterator valuesIterator = starTreeValues.getDimensionValuesIterator(remainingPredicateColumn); + // Get the query value directly + List dimensionFilters = starTreeFilter.getFiltersForDimension(remainingPredicateColumn); // Clear the temporary bit set before reuse tempBitSet.clear(0, starTreeResult.maxMatchedDoc + 1); @@ -92,14 +96,12 @@ public static FixedBitSet getStarTreeResult( for (int entryId = bitSet.nextSetBit(0); entryId != DocIdSetIterator.NO_MORE_DOCS; entryId = (entryId + 1 < bitSet.length()) ? bitSet.nextSetBit(entryId + 1) : DocIdSetIterator.NO_MORE_DOCS) { - if (ndv.advance(entryId) != StarTreeValuesIterator.NO_MORE_ENTRIES) { - final int valuesCount = ndv.entryValueCount(); - for (int i = 0; i < valuesCount; i++) { - long value = ndv.nextValue(); - // Compare the value with the query value - if (value == queryValue) { - tempBitSet.set(entryId); // Set bit for the matching entryId - break; // No need to check other values for this entryId + if (valuesIterator.advanceExact(entryId)) { + long value = valuesIterator.value(); + for (DimensionFilter dimensionFilter : dimensionFilters) { + if (dimensionFilter.matchDimValue(value, starTreeValues)) { + tempBitSet.set(entryId);// Set bit for the matching entryId + break; } } } @@ -117,8 +119,7 @@ public static FixedBitSet getStarTreeResult( * Helper method to traverse the star tree, get matching documents and keep track of all the * predicate dimensions that are not matched. */ - private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Map queryMap, Set groupbyField) - throws IOException { + private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, StarTreeFilter starTreeFilter) throws IOException { DocIdSetBuilder docsWithField = new DocIdSetBuilder(starTreeValues.getStarTreeDocumentCount()); DocIdSetBuilder.BulkAdder adder; Set globalRemainingPredicateColumns = null; @@ -130,8 +131,7 @@ private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Ma Queue queue = new ArrayDeque<>(); queue.add(starTree); int currentDimensionId = -1; - Set remainingPredicateColumns = new HashSet<>(queryMap.keySet()); - Set remainingGroupByColumns = new HashSet<>(groupbyField); + Set remainingPredicateColumns = new HashSet<>(starTreeFilter.getDimensions()); int matchedDocsCountInStarTree = 0; int maxDocNum = -1; StarTreeNode starTreeNode; @@ -142,14 +142,13 @@ private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Ma if (dimensionId > currentDimensionId) { String dimension = dimensionNames.get(dimensionId); remainingPredicateColumns.remove(dimension); - remainingGroupByColumns.remove(dimension); if (foundLeafNode && globalRemainingPredicateColumns == null) { globalRemainingPredicateColumns = new HashSet<>(remainingPredicateColumns); } currentDimensionId = dimensionId; } - if (remainingPredicateColumns.isEmpty() && remainingGroupByColumns.isEmpty()) { + if (remainingPredicateColumns.isEmpty()) { int docId = starTreeNode.getAggregatedDocId(); docIds.add(docId); matchedDocsCountInStarTree++; @@ -168,18 +167,20 @@ private static StarTreeResult traverseStarTree(StarTreeValues starTreeValues, Ma String childDimension = dimensionNames.get(dimensionId + 1); StarTreeNode starNode = null; - if (((globalRemainingPredicateColumns == null || !globalRemainingPredicateColumns.contains(childDimension)) - && !remainingGroupByColumns.contains(childDimension))) { + if (globalRemainingPredicateColumns == null || !globalRemainingPredicateColumns.contains(childDimension)) { starNode = starTreeNode.getChildStarNode(); } if (remainingPredicateColumns.contains(childDimension)) { - long queryValue = queryMap.get(childDimension); // Get the query value directly from the map - StarTreeNode matchingChild = starTreeNode.getChildForDimensionValue(queryValue); - if (matchingChild != null) { - queue.add(matchingChild); - foundLeafNode |= matchingChild.isLeaf(); + List dimensionFilters = starTreeFilter.getFiltersForDimension(childDimension); + final boolean[] tempFoundLeafNodes = new boolean[1]; + for (DimensionFilter dimensionFilter : dimensionFilters) { + dimensionFilter.matchStarTreeNodes(starTreeNode, starTreeValues, node -> { + queue.add(node); + tempFoundLeafNodes[0] |= node.isLeaf(); + }); } + foundLeafNode |= tempFoundLeafNodes[0]; } else { if (starNode != null) { queue.add(starNode); diff --git a/server/src/main/java/org/opensearch/search/startree/filter/DimensionFilter.java b/server/src/main/java/org/opensearch/search/startree/filter/DimensionFilter.java new file mode 100644 index 0000000000000..64f971a58f216 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/DimensionFilter.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree.filter; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeNodeCollector; + +import java.io.IOException; +import java.util.Iterator; + +/** + * Contains the logic to filter over a dimension either in StarTree Index or it's Dimension DocValues + */ +@ExperimentalApi +public interface DimensionFilter { + + DimensionFilter MATCH_ALL_DEFAULT = new DimensionFilter() { + @Override + public void initialiseForSegment(StarTreeValues starTreeValues, SearchContext searchContext) throws IOException { + + } + + @Override + public void matchStarTreeNodes(StarTreeNode parentNode, StarTreeValues starTreeValues, StarTreeNodeCollector collector) + throws IOException { + if (parentNode != null) { + for (Iterator it = parentNode.getChildrenIterator(); it.hasNext();) { + StarTreeNode starTreeNode = it.next(); + if (starTreeNode.getStarTreeNodeType() == StarTreeNodeType.DEFAULT.getValue()) { + collector.collectStarTreeNode(starTreeNode); + } + } + } + } + + @Override + public boolean matchDimValue(long ordinal, StarTreeValues starTreeValues) { + return true; + } + }; + + /** + * Converts parsed user values to ordinals based on segment and other init actions can be performed. + * @param starTreeValues : Segment specific star tree root node and other metadata + * @param searchContext : Search context + * @throws IOException : + */ + void initialiseForSegment(StarTreeValues starTreeValues, SearchContext searchContext) throws IOException; + + /** + * Called when matching a dimension values in the star tree index. + * @param parentNode : StarTreeNode below which the dimension to be filtered is present. + * @param starTreeValues : Segment specific star tree root node and other metadata + * @param collector : Collector which collates the matched StarTreeNode's + * @throws IOException : + */ + void matchStarTreeNodes(StarTreeNode parentNode, StarTreeValues starTreeValues, StarTreeNodeCollector collector) throws IOException; + + /** + * Called when a dimension is not found in star tree index and needs to matched by iterating its docValues + * @param ordinal : Value to Match + * @param starTreeValues : Segment specific star tree root node and other metadata + * @return : true if matches, else false. + */ + boolean matchDimValue(long ordinal, StarTreeValues starTreeValues); + + /** + * Represents how to match a value when comparing during StarTreeTraversal + */ + @ExperimentalApi + enum MatchType { + GT, + LT, + GTE, + LTE, + EXACT + } + +} diff --git a/server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java b/server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java new file mode 100644 index 0000000000000..28ea261ca1e56 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/ExactMatchDimFilter.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree.filter; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeNodeCollector; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.filter.provider.DimensionFilterMapper; + +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.TreeSet; + +/** + * Handles Term and Terms query like search in StarTree Dimension filtering. + */ +@ExperimentalApi +public class ExactMatchDimFilter implements DimensionFilter { + + private final String dimensionName; + + private final List rawValues; + + // Order is essential for successive binary search + private TreeSet convertedOrdinals; + + public ExactMatchDimFilter(String dimensionName, List valuesToMatch) { + this.dimensionName = dimensionName; + this.rawValues = valuesToMatch; + } + + @Override + public void initialiseForSegment(StarTreeValues starTreeValues, SearchContext searchContext) { + convertedOrdinals = new TreeSet<>(); + Dimension matchedDim = StarTreeQueryHelper.getMatchingDimensionOrThrow( + dimensionName, + starTreeValues.getStarTreeField().getDimensionsOrder() + ); + DimensionFilterMapper dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( + searchContext.mapperService().fieldType(dimensionName) + ); + for (Object rawValue : rawValues) { + Optional ordinal = dimensionFilterMapper.getMatchingOrdinal( + matchedDim.getField(), + rawValue, + starTreeValues, + MatchType.EXACT + ); + // Numeric type returning negative ordinal ( same as their value ) is valid + // Whereas Keyword type returning -ve ordinal indicates it doesn't exist in Star Tree Dimension values. + ordinal.ifPresent(aLong -> convertedOrdinals.add(aLong)); + } + } + + @Override + public void matchStarTreeNodes(StarTreeNode parentNode, StarTreeValues starTreeValues, StarTreeNodeCollector collector) + throws IOException { + if (parentNode != null) { + StarTreeNode lastMatchedNode = null; + for (long ordinal : convertedOrdinals) { + lastMatchedNode = parentNode.getChildForDimensionValue(ordinal, lastMatchedNode); + if (lastMatchedNode != null) { + collector.collectStarTreeNode(lastMatchedNode); + } + } + } + } + + @Override + public boolean matchDimValue(long ordinal, StarTreeValues starTreeValues) { + return convertedOrdinals.contains(ordinal); + } +} diff --git a/server/src/main/java/org/opensearch/search/startree/filter/MatchNoneFilter.java b/server/src/main/java/org/opensearch/search/startree/filter/MatchNoneFilter.java new file mode 100644 index 0000000000000..3066b4d7a8a3f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/MatchNoneFilter.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree.filter; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeNodeCollector; + +/** + * Filter which matches no StarTreeNodes. + */ +@ExperimentalApi +public class MatchNoneFilter implements DimensionFilter { + @Override + public void initialiseForSegment(StarTreeValues starTreeValues, SearchContext searchContext) { + // Nothing to do as we won't match anything. + } + + @Override + public void matchStarTreeNodes(StarTreeNode parentNode, StarTreeValues starTreeValues, StarTreeNodeCollector collector) { + // Don't match any star tree node. + } + + @Override + public boolean matchDimValue(long ordinal, StarTreeValues starTreeValues) { + return false; + } +} diff --git a/server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java b/server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java new file mode 100644 index 0000000000000..fecf1a9ebf76b --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/RangeMatchDimFilter.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree.filter; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeNodeCollector; +import org.opensearch.search.startree.filter.provider.DimensionFilterMapper; + +import java.io.IOException; +import java.util.Optional; + +/** + * Performs range match based on the params of @{@link org.opensearch.index.query.RangeQueryBuilder} + * Also, contains logic to skip performing range search if it's sure that it won't be found in Star Tree. + */ +@ExperimentalApi +public class RangeMatchDimFilter implements DimensionFilter { + + private final String dimensionName; + + private final Object low; + private final Object high; + private final boolean includeLow; + private final boolean includeHigh; + + private Long lowOrdinal; + private Long highOrdinal; + + private boolean skipRangeCollection = false; + + public RangeMatchDimFilter(String dimensionName, Object low, Object high, boolean includeLow, boolean includeHigh) { + this.dimensionName = dimensionName; + this.low = low; + this.high = high; + this.includeLow = includeLow; + this.includeHigh = includeHigh; + } + + @Override + public void initialiseForSegment(StarTreeValues starTreeValues, SearchContext searchContext) { + skipRangeCollection = false; + DimensionFilterMapper dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( + searchContext.mapperService().fieldType(dimensionName) + ); + lowOrdinal = 0L; + if (low != null) { + MatchType lowMatchType = includeLow ? MatchType.GTE : MatchType.GT; + Optional lowOrdinalFound = dimensionFilterMapper.getMatchingOrdinal(dimensionName, low, starTreeValues, lowMatchType); + if (lowOrdinalFound.isPresent()) { + lowOrdinal = lowOrdinalFound.get(); + } else { + // This is only valid for Non-numeric fields. + // High can't be found since nothing >= low exists. + lowOrdinal = highOrdinal = Long.MAX_VALUE; + skipRangeCollection = true; + return; + } + } + highOrdinal = Long.MAX_VALUE; + if (high != null) { + MatchType highMatchType = includeHigh ? MatchType.LTE : MatchType.LT; + Optional highOrdinalFound = dimensionFilterMapper.getMatchingOrdinal(dimensionName, high, starTreeValues, highMatchType); + highOrdinalFound.ifPresent(ord -> highOrdinal = ord); + } + } + + @Override + public void matchStarTreeNodes(StarTreeNode parentNode, StarTreeValues starTreeValues, StarTreeNodeCollector collector) + throws IOException { + if (parentNode != null && !skipRangeCollection) { + parentNode.collectChildrenInRange(lowOrdinal, highOrdinal, collector); + } + } + + @Override + public boolean matchDimValue(long ordinal, StarTreeValues starTreeValues) { + return lowOrdinal <= ordinal && ordinal <= highOrdinal; + } + +} diff --git a/server/src/main/java/org/opensearch/search/startree/filter/StarTreeFilter.java b/server/src/main/java/org/opensearch/search/startree/filter/StarTreeFilter.java new file mode 100644 index 0000000000000..38a1f092adc6f --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/StarTreeFilter.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree.filter; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Container for intermediate/consolidated dimension filters that will be applied for a query in star tree traversal. + */ +@ExperimentalApi +public class StarTreeFilter { + + private final Map> dimensionFilterMap; + + public StarTreeFilter(Map> dimensionFilterMap) { + this.dimensionFilterMap = dimensionFilterMap; + } + + public List getFiltersForDimension(String dimension) { + return dimensionFilterMap.get(dimension); + } + + public Set getDimensions() { + return dimensionFilterMap.keySet(); + } + // TODO : Implement Merging of 2 Star Tree Filters + // This would also involve merging 2 different types of dimension filters. + // It also brings in the challenge of sorting input values in user query for efficient merging. + // Merging Range with Term and Range with Range and so on. + // All these will be implemented post OS 2.19 + +} diff --git a/server/src/main/java/org/opensearch/search/startree/filter/package-info.java b/server/src/main/java/org/opensearch/search/startree/filter/package-info.java new file mode 100644 index 0000000000000..565996d9bfb21 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Star Tree Dimension Filters */ +package org.opensearch.search.startree.filter; diff --git a/server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java b/server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java new file mode 100644 index 0000000000000..8afdb00864b22 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/provider/DimensionFilterMapper.java @@ -0,0 +1,410 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree.filter.provider; + +import org.apache.lucene.document.DoublePoint; +import org.apache.lucene.document.FloatPoint; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.sandbox.document.HalfFloatPoint; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.NumericUtils; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.lucene.BytesRefs; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; +import org.opensearch.index.mapper.KeywordFieldMapper.KeywordFieldType; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.NumberFieldMapper.NumberFieldType; +import org.opensearch.search.startree.filter.DimensionFilter; +import org.opensearch.search.startree.filter.ExactMatchDimFilter; +import org.opensearch.search.startree.filter.MatchNoneFilter; +import org.opensearch.search.startree.filter.RangeMatchDimFilter; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.BYTE; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.DOUBLE; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.FLOAT; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.HALF_FLOAT; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.INTEGER; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.LONG; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.SHORT; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.hasDecimalPart; +import static org.opensearch.index.mapper.NumberFieldMapper.NumberType.signum; + +/** + * Generates the @{@link DimensionFilter} raw values and the @{@link MappedFieldType} of the dimension. + */ +@ExperimentalApi +public interface DimensionFilterMapper { + /** + * Generates @{@link ExactMatchDimFilter} from Term/Terms query input. + * @param mappedFieldType: + * @param rawValues: + * @return : + */ + DimensionFilter getExactMatchFilter(MappedFieldType mappedFieldType, List rawValues); + + /** + * Generates @{@link RangeMatchDimFilter} from Range query input. + * @param mappedFieldType: + * @param rawLow: + * @param rawHigh: + * @param includeLow: + * @param includeHigh: + * @return : + */ + DimensionFilter getRangeMatchFilter( + MappedFieldType mappedFieldType, + Object rawLow, + Object rawHigh, + boolean includeLow, + boolean includeHigh + ); + + /** + * Called during conversion from parsedUserInput to segmentOrdinal for every segment. + * @param dimensionName: + * @param value: + * @param starTreeValues: + * @param matchType: + * @return : + */ + Optional getMatchingOrdinal( + String dimensionName, + Object value, + StarTreeValues starTreeValues, + DimensionFilter.MatchType matchType + ); + + /** + * Singleton Factory for @{@link DimensionFilterMapper} + */ + class Factory { + + private static final Map DIMENSION_FILTER_MAPPINGS = Map.of( + BYTE.typeName(), + new IntegerFieldMapperNumeric(), + SHORT.typeName(), + new IntegerFieldMapperNumeric(), + INTEGER.typeName(), + new IntegerFieldMapperNumeric(), + LONG.typeName(), + new SignedLongFieldMapperNumeric(), + HALF_FLOAT.typeName(), + new HalfFloatFieldMapperNumeric(), + FLOAT.typeName(), + new FloatFieldMapperNumeric(), + DOUBLE.typeName(), + new DoubleFieldMapperNumeric(), + org.opensearch.index.mapper.KeywordFieldMapper.CONTENT_TYPE, + new KeywordFieldMapper() + ); + + public static DimensionFilterMapper fromMappedFieldType(MappedFieldType mappedFieldType) { + if (mappedFieldType != null) { + return DIMENSION_FILTER_MAPPINGS.get(mappedFieldType.typeName()); + } + return null; + } + } + +} + +abstract class NumericMapper implements DimensionFilterMapper { + + @Override + public Optional getMatchingOrdinal( + String dimensionName, + Object value, + StarTreeValues starTreeValues, + DimensionFilter.MatchType matchType + ) { + // Casting to long ensures that all numeric fields have been converted to equivalent long at request parsing time. + return Optional.of((long) value); + } +} + +abstract class NumericNonDecimalMapper extends NumericMapper { + + @Override + public DimensionFilter getExactMatchFilter(MappedFieldType mappedFieldType, List rawValues) { + NumberFieldType numberFieldType = (NumberFieldType) mappedFieldType; + List convertedValues = new ArrayList<>(rawValues.size()); + for (Object rawValue : rawValues) { + convertedValues.add(numberFieldType.numberType().parse(rawValue, true).longValue()); + } + return new ExactMatchDimFilter(mappedFieldType.name(), convertedValues); + } + + @Override + public DimensionFilter getRangeMatchFilter( + MappedFieldType mappedFieldType, + Object rawLow, + Object rawHigh, + boolean includeLow, + boolean includeHigh + ) { + NumberFieldType numberFieldType = (NumberFieldType) mappedFieldType; + + Long parsedLow = rawLow == null ? defaultMinimum() : numberFieldType.numberType().parse(rawLow, true).longValue(); + Long parsedHigh = rawHigh == null ? defaultMaximum() : numberFieldType.numberType().parse(rawHigh, true).longValue(); + + boolean lowerTermHasDecimalPart = hasDecimalPart(parsedLow); + if ((lowerTermHasDecimalPart == false && includeLow == false) || (lowerTermHasDecimalPart && signum(parsedLow) > 0)) { + if (parsedLow.equals(defaultMaximum())) { + return new MatchNoneFilter(); + } + ++parsedLow; + } + boolean upperTermHasDecimalPart = hasDecimalPart(parsedHigh); + if ((upperTermHasDecimalPart == false && includeHigh == false) || (upperTermHasDecimalPart && signum(parsedHigh) < 0)) { + if (parsedHigh.equals(defaultMinimum())) { + return new MatchNoneFilter(); + } + --parsedHigh; + } + return new RangeMatchDimFilter(mappedFieldType.name(), parsedLow, parsedHigh, true, true); + } + + abstract Long defaultMinimum(); + + abstract Long defaultMaximum(); + +} + +class IntegerFieldMapperNumeric extends NumericNonDecimalMapper { + @Override + Long defaultMinimum() { + return (long) Integer.MIN_VALUE; + } + + @Override + Long defaultMaximum() { + return (long) Integer.MAX_VALUE; + } +} + +class SignedLongFieldMapperNumeric extends NumericNonDecimalMapper { + @Override + Long defaultMinimum() { + return Long.MIN_VALUE; + } + + @Override + Long defaultMaximum() { + return Long.MAX_VALUE; + } +} + +abstract class NumericDecimalFieldMapper extends NumericMapper { + + @Override + public DimensionFilter getExactMatchFilter(MappedFieldType mappedFieldType, List rawValues) { + NumberFieldType numberFieldType = (NumberFieldType) mappedFieldType; + List convertedValues = new ArrayList<>(rawValues.size()); + for (Object rawValue : rawValues) { + convertedValues.add(convertToDocValues(numberFieldType.numberType().parse(rawValue, true))); + } + return new ExactMatchDimFilter(mappedFieldType.name(), convertedValues); + } + + @Override + public DimensionFilter getRangeMatchFilter( + MappedFieldType mappedFieldType, + Object rawLow, + Object rawHigh, + boolean includeLow, + boolean includeHigh + ) { + NumberFieldType numberFieldType = (NumberFieldType) mappedFieldType; + Number l = Long.MIN_VALUE; + Number u = Long.MAX_VALUE; + if (rawLow != null) { + l = numberFieldType.numberType().parse(rawLow, false); + if (includeLow == false) { + l = getNextHigh(l); + } + l = convertToDocValues(l); + } + if (rawHigh != null) { + u = numberFieldType.numberType().parse(rawHigh, false); + if (includeHigh == false) { + u = getNextLow(u); + } + u = convertToDocValues(u); + } + return new RangeMatchDimFilter(numberFieldType.name(), l, u, true, true); + } + + abstract long convertToDocValues(Number parsedValue); + + abstract Number getNextLow(Number parsedValue); + + abstract Number getNextHigh(Number parsedValue); + +} + +class HalfFloatFieldMapperNumeric extends NumericDecimalFieldMapper { + @Override + long convertToDocValues(Number parsedValue) { + return HalfFloatPoint.halfFloatToSortableShort((Float) parsedValue); + } + + @Override + Number getNextLow(Number parsedValue) { + return HalfFloatPoint.nextDown((Float) parsedValue); + } + + @Override + Number getNextHigh(Number parsedValue) { + return HalfFloatPoint.nextUp((Float) parsedValue); + } +} + +class FloatFieldMapperNumeric extends NumericDecimalFieldMapper { + @Override + long convertToDocValues(Number parsedValue) { + return NumericUtils.floatToSortableInt((Float) parsedValue); + } + + @Override + Number getNextLow(Number parsedValue) { + return FloatPoint.nextDown((Float) parsedValue); + } + + @Override + Number getNextHigh(Number parsedValue) { + return FloatPoint.nextUp((Float) parsedValue); + } +} + +class DoubleFieldMapperNumeric extends NumericDecimalFieldMapper { + @Override + long convertToDocValues(Number parsedValue) { + return NumericUtils.doubleToSortableLong((Double) parsedValue); + } + + @Override + Number getNextLow(Number parsedValue) { + return DoublePoint.nextDown((Double) parsedValue); + } + + @Override + Number getNextHigh(Number parsedValue) { + return DoublePoint.nextUp((Double) parsedValue); + } +} + +class KeywordFieldMapper implements DimensionFilterMapper { + + @Override + public DimensionFilter getExactMatchFilter(MappedFieldType mappedFieldType, List rawValues) { + KeywordFieldType keywordFieldType = (KeywordFieldType) mappedFieldType; + List convertedValues = new ArrayList<>(rawValues.size()); + for (Object rawValue : rawValues) { + convertedValues.add(parseRawKeyword(mappedFieldType.name(), rawValue, keywordFieldType)); + } + return new ExactMatchDimFilter(mappedFieldType.name(), convertedValues); + } + + @Override + public DimensionFilter getRangeMatchFilter( + MappedFieldType mappedFieldType, + Object rawLow, + Object rawHigh, + boolean includeLow, + boolean includeHigh + ) { + KeywordFieldType keywordFieldType = (KeywordFieldType) mappedFieldType; + return new RangeMatchDimFilter( + mappedFieldType.name(), + parseRawKeyword(mappedFieldType.name(), rawLow, keywordFieldType), + parseRawKeyword(mappedFieldType.name(), rawHigh, keywordFieldType), + includeLow, + includeHigh + ); + } + + @Override + public Optional getMatchingOrdinal( + String dimensionName, + Object value, + StarTreeValues starTreeValues, + DimensionFilter.MatchType matchType + ) { + SortedSetStarTreeValuesIterator sortedSetIterator = (SortedSetStarTreeValuesIterator) starTreeValues.getDimensionValuesIterator( + dimensionName + ); + try { + if (matchType == DimensionFilter.MatchType.EXACT) { + long ordinal = sortedSetIterator.lookupTerm((BytesRef) value); + return ordinal >= 0 ? Optional.of(ordinal) : Optional.empty(); + } else { + TermsEnum termsEnum = sortedSetIterator.termsEnum(); + TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil((BytesRef) value); + // We reached the end and couldn't match anything, else we found a term which matches. + // LT || LTE + // If we found a term just greater, then return ordinal of the term just before it. + // Checking if we are in bounds for satisfying LT + // Checking if we are in bounds for satisfying LT + switch (matchType) { + case GTE: + return seekStatus == TermsEnum.SeekStatus.END ? Optional.empty() : Optional.of(termsEnum.ord()); + case GT: + return switch (seekStatus) { + case END -> Optional.empty(); + case FOUND -> ((termsEnum.ord() + 1) < sortedSetIterator.getValueCount()) + ? Optional.of(termsEnum.ord() + 1) + : Optional.empty(); + case NOT_FOUND -> Optional.of(termsEnum.ord()); + }; + case LTE: + if (seekStatus == TermsEnum.SeekStatus.NOT_FOUND) { + return ((termsEnum.ord() - 1) >= 0) ? Optional.of(termsEnum.ord() - 1) : Optional.empty(); + } else { + return Optional.of(termsEnum.ord()); + } + case LT: + if (seekStatus == TermsEnum.SeekStatus.END) { + return Optional.of(termsEnum.ord()); + } else { + return ((termsEnum.ord() - 1) >= 0) ? Optional.of(termsEnum.ord() - 1) : Optional.empty(); + } + default: + throw new IllegalStateException("unexpected matchType " + matchType); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + // TODO : Think around making TermBasedFT#indexedValueForSearch() accessor public for reuse here. + private Object parseRawKeyword(String field, Object rawValue, KeywordFieldType keywordFieldType) { + Object parsedValue = null; + if (rawValue != null) { + if (keywordFieldType.getTextSearchInfo().getSearchAnalyzer() == Lucene.KEYWORD_ANALYZER) { + parsedValue = BytesRefs.toBytesRef(rawValue); + } else { + if (rawValue instanceof BytesRef) { + rawValue = ((BytesRef) rawValue).utf8ToString(); + } + parsedValue = keywordFieldType.getTextSearchInfo().getSearchAnalyzer().normalize(field, rawValue.toString()); + } + } + return parsedValue; + } + +} diff --git a/server/src/main/java/org/opensearch/search/startree/filter/provider/StarTreeFilterProvider.java b/server/src/main/java/org/opensearch/search/startree/filter/provider/StarTreeFilterProvider.java new file mode 100644 index 0000000000000..61f44ba3f163e --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/provider/StarTreeFilterProvider.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.startree.filter.provider; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; +import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.filter.StarTreeFilter; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Converts a {@link QueryBuilder} into a {@link StarTreeFilter} by generating the appropriate @{@link org.opensearch.search.startree.filter.DimensionFilter} + * for the fields provided in the user query. + */ +@ExperimentalApi +public interface StarTreeFilterProvider { + + /** + * Returns the {@link StarTreeFilter} generated from the {@link QueryBuilder} + * @param context: + * @param rawFilter: + * @param compositeFieldType: + * @return : {@link StarTreeFilter} if the query shape is supported, else null. + * @throws IOException : + */ + StarTreeFilter getFilter(SearchContext context, QueryBuilder rawFilter, CompositeDataCubeFieldType compositeFieldType) + throws IOException; + + StarTreeFilterProvider MATCH_ALL_PROVIDER = (context, rawFilter, compositeFieldType) -> new StarTreeFilter(Collections.emptyMap()); + + /** + * Singleton instances for most {@link StarTreeFilterProvider} + */ + class SingletonFactory { + + private static final Map QUERY_BUILDERS_TO_STF_PROVIDER = Map.of( + MatchAllQueryBuilder.NAME, + MATCH_ALL_PROVIDER, + TermQueryBuilder.NAME, + new TermStarTreeFilterProvider(), + TermsQueryBuilder.NAME, + new TermsStarTreeFilterProvider(), + RangeQueryBuilder.NAME, + new RangeStarTreeFilterProvider() + ); + + public static StarTreeFilterProvider getProvider(QueryBuilder query) { + if (query != null) { + return QUERY_BUILDERS_TO_STF_PROVIDER.get(query.getName()); + } + return MATCH_ALL_PROVIDER; + } + + } + + /** + * Converts @{@link TermQueryBuilder} into @{@link org.opensearch.search.startree.filter.ExactMatchDimFilter} + */ + class TermStarTreeFilterProvider implements StarTreeFilterProvider { + @Override + public StarTreeFilter getFilter(SearchContext context, QueryBuilder rawFilter, CompositeDataCubeFieldType compositeFieldType) + throws IOException { + TermQueryBuilder termQueryBuilder = (TermQueryBuilder) rawFilter; + String field = termQueryBuilder.fieldName(); + MappedFieldType mappedFieldType = context.mapperService().fieldType(field); + DimensionFilterMapper dimensionFilterMapper = mappedFieldType != null + ? DimensionFilterMapper.Factory.fromMappedFieldType(mappedFieldType) + : null; + Dimension matchedDimension = StarTreeQueryHelper.getMatchingDimensionOrNull(field, compositeFieldType.getDimensions()); + if (matchedDimension == null || mappedFieldType == null || dimensionFilterMapper == null) { + return null; // Indicates Aggregators to fallback to default implementation. + } else { + return new StarTreeFilter( + Map.of(field, List.of(dimensionFilterMapper.getExactMatchFilter(mappedFieldType, List.of(termQueryBuilder.value())))) + ); + } + } + } + + /** + * Converts @{@link TermsQueryBuilder} into @{@link org.opensearch.search.startree.filter.ExactMatchDimFilter} + */ + class TermsStarTreeFilterProvider implements StarTreeFilterProvider { + @Override + public StarTreeFilter getFilter(SearchContext context, QueryBuilder rawFilter, CompositeDataCubeFieldType compositeFieldType) + throws IOException { + TermsQueryBuilder termsQueryBuilder = (TermsQueryBuilder) rawFilter; + String field = termsQueryBuilder.fieldName(); + Dimension matchedDimension = StarTreeQueryHelper.getMatchingDimensionOrNull(field, compositeFieldType.getDimensions()); + MappedFieldType mappedFieldType = context.mapperService().fieldType(field); + DimensionFilterMapper dimensionFilterMapper = mappedFieldType != null + ? DimensionFilterMapper.Factory.fromMappedFieldType(mappedFieldType) + : null; + if (matchedDimension == null || mappedFieldType == null || dimensionFilterMapper == null) { + return null; // Indicates Aggregators to fallback to default implementation. + } else { + return new StarTreeFilter( + Map.of(field, List.of(dimensionFilterMapper.getExactMatchFilter(mappedFieldType, termsQueryBuilder.values()))) + ); + } + } + } + + /** + * Converts @{@link RangeQueryBuilder} into @{@link org.opensearch.search.startree.filter.RangeMatchDimFilter} + */ + class RangeStarTreeFilterProvider implements StarTreeFilterProvider { + + @Override + public StarTreeFilter getFilter(SearchContext context, QueryBuilder rawFilter, CompositeDataCubeFieldType compositeFieldType) + throws IOException { + RangeQueryBuilder rangeQueryBuilder = (RangeQueryBuilder) rawFilter; + String field = rangeQueryBuilder.fieldName(); + Dimension matchedDimension = StarTreeQueryHelper.getMatchingDimensionOrNull(field, compositeFieldType.getDimensions()); + MappedFieldType mappedFieldType = context.mapperService().fieldType(field); + DimensionFilterMapper dimensionFilterMapper = mappedFieldType == null + ? null + : DimensionFilterMapper.Factory.fromMappedFieldType(mappedFieldType); + if (matchedDimension == null || mappedFieldType == null || dimensionFilterMapper == null) { + return null; + } else { + return new StarTreeFilter( + Map.of( + field, + List.of( + dimensionFilterMapper.getRangeMatchFilter( + mappedFieldType, + rangeQueryBuilder.from(), + rangeQueryBuilder.to(), + rangeQueryBuilder.includeLower(), + rangeQueryBuilder.includeUpper() + ) + ) + ) + ); + } + } + + } + +} diff --git a/server/src/main/java/org/opensearch/search/startree/filter/provider/package-info.java b/server/src/main/java/org/opensearch/search/startree/filter/provider/package-info.java new file mode 100644 index 0000000000000..b6ff423e1f140 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/startree/filter/provider/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Star Tree Filter and Dimension Filter Providers */ +package org.opensearch.search.startree.filter.provider; diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java new file mode 100644 index 0000000000000..4d95034d80bb7 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/fileformats/node/FixedLengthStarTreeNodeSearchTests.java @@ -0,0 +1,338 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.fileformats.node; + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.StarTreeWriter; +import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; +import org.opensearch.index.compositeindex.datacube.startree.node.InMemoryTreeNode; +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeFactory; +import org.opensearch.search.aggregations.startree.ArrayBasedCollector; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.TreeSet; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils.ALL; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class FixedLengthStarTreeNodeSearchTests extends OpenSearchTestCase { + + public void testExactMatch() { + long[] randomSorted = random().longs(100, Long.MIN_VALUE, Long.MAX_VALUE).toArray(); + Arrays.sort(randomSorted); + for (boolean createStarNode : new boolean[] { true, false }) { + for (boolean createNullNode : new boolean[] { true, false }) { + createStarTreeForDimension(new long[] { -1, 1, 2, 5 }, createStarNode, createNullNode, List.of(fixedLengthStarTreeNode -> { + try { + boolean result = true; + FixedLengthStarTreeNode lastMatchedNode; + lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(-1L); + result &= -1 == lastMatchedNode.getDimensionValue(); + // Leaf Node should return null + result &= null == lastMatchedNode.getChildForDimensionValue(5L); + result &= null == lastMatchedNode.getChildForDimensionValue(5L, lastMatchedNode); + // Asserting Last Matched Node works as expected + lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(1L, lastMatchedNode); + result &= 1 == lastMatchedNode.getDimensionValue(); + lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(5L, lastMatchedNode); + result &= 5 == lastMatchedNode.getDimensionValue(); + // Asserting null is returned when last matched node is after the value to search. + lastMatchedNode = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(2L, lastMatchedNode); + result &= null == lastMatchedNode; + // When dimension value is null + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(null); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(null, null); + // non-existing dimensionValue + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(4L); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(randomLongBetween(6, Long.MAX_VALUE)); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(randomLongBetween(Long.MIN_VALUE, -2)); + return result; + } catch (IOException e) { + throw new RuntimeException(e); + } + })); + createStarTreeForDimension(new long[] { 1 }, createStarNode, createNullNode, List.of(fixedLengthStarTreeNode -> { + try { + boolean result = true; + result &= 1 == fixedLengthStarTreeNode.getChildForDimensionValue(1L).getDimensionValue(); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(2L); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(randomLongBetween(2, Long.MAX_VALUE)); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(randomLongBetween(Long.MIN_VALUE, 0)); + return result; + } catch (IOException e) { + throw new RuntimeException(e); + } + })); + createStarTreeForDimension(new long[] {}, createStarNode, createNullNode, List.of(fixedLengthStarTreeNode -> { + try { + boolean result = true; + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(1L); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(randomLongBetween(0, Long.MAX_VALUE)); + result &= null == fixedLengthStarTreeNode.getChildForDimensionValue(randomLongBetween(Long.MIN_VALUE, 0)); + return result; + } catch (IOException e) { + throw new RuntimeException(e); + } + })); + createStarTreeForDimension(randomSorted, createStarNode, createNullNode, List.of(fixedLengthStarTreeNode -> { + boolean result = true; + for (int i = 1; i <= 100; i++) { + try { + ArrayBasedCollector collector = new ArrayBasedCollector(); + long key = randomLong(); + FixedLengthStarTreeNode node = (FixedLengthStarTreeNode) fixedLengthStarTreeNode.getChildForDimensionValue(key); + long match = Arrays.binarySearch(randomSorted, key); + if (match >= 0) { + assertNotNull(node); + assertEquals(key, node.getDimensionValue()); + } else { + assertEquals(0, collector.collectedNodeCount()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return result; + })); + } + } + } + + public void testRangeMatch() { + long[] randomSorted = random().longs(100, Long.MIN_VALUE, Long.MAX_VALUE).toArray(); + Arrays.sort(randomSorted); + for (boolean createStarNode : new boolean[] { true, false }) { + for (boolean createNullNode : new boolean[] { true, false }) { + createStarTreeForDimension( + new long[] { -10, -1, 1, 2, 5, 9, 25 }, + createStarNode, + createNullNode, + List.of(fixedLengthStarTreeNode -> { + try { + boolean result = true; + ArrayBasedCollector collector; + // Whole range + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(-20, 26, collector); + result &= collector.matchAllCollectedValues(new long[] { -10, -1, 1, 2, 5, 9, 25 }); + // Subset matched from left + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(-2, 1, collector); + result &= collector.matchAllCollectedValues(new long[] { -1, 1 }); + // Subset matched from right + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(6, 100, collector); + result &= collector.matchAllCollectedValues(new long[] { 9, 25 }); + // No match on left + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(-30, -20, collector); + result &= collector.collectedNodeCount() == 0; + // No match on right + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(30, 50, collector); + result &= collector.collectedNodeCount() == 0; + // Low > High + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(50, 10, collector); + result &= collector.collectedNodeCount() == 0; + // Match leftmost + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(-30, -10, collector); + result &= collector.matchAllCollectedValues(new long[] { -10 }); + // Match rightmost + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(10, 25, collector); + result &= collector.matchAllCollectedValues(new long[] { 25 }); + // Match contains interval which has nothing + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(10, 24, collector); + result &= collector.collectedNodeCount() == 0; + // Match contains interval which has nothing + collector = new ArrayBasedCollector(); + fixedLengthStarTreeNode.collectChildrenInRange(6, 24, collector); + result &= collector.matchAllCollectedValues(new long[] { 9 }); + return result; + } catch (IOException e) { + throw new RuntimeException(e); + } + }) + ); + createStarTreeForDimension(randomSorted, createStarNode, createNullNode, List.of(fixedLengthStarTreeNode -> { + boolean result = true; + TreeSet treeSet = Arrays.stream(randomSorted).boxed().collect(Collectors.toCollection(TreeSet::new)); + for (int i = 1; i <= 100; i++) { + try { + ArrayBasedCollector collector = new ArrayBasedCollector(); + long low = randomLong(), high = randomLong(); + fixedLengthStarTreeNode.collectChildrenInRange(low, high, collector); + if (low < high) { + Long lowValue = treeSet.ceiling(low); + if (lowValue != null) { + Long highValue = treeSet.floor(high); + if (highValue != null && highValue >= lowValue) { + collector.matchAllCollectedValues( + Arrays.copyOfRange( + randomSorted, + Arrays.binarySearch(randomSorted, lowValue), + Arrays.binarySearch(randomSorted, highValue) + ) + ); + } else if (lowValue <= high) { + collector.matchAllCollectedValues(new long[] { lowValue }); + } else { + assertEquals(0, collector.collectedNodeCount()); + } + } else { + assertEquals(0, collector.collectedNodeCount()); + } + } else if (low == high) { + collector.matchAllCollectedValues(new long[] { low }); + } else { + assertEquals(0, collector.collectedNodeCount()); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return result; + })); + } + } + } + + private void createStarTreeForDimension( + long[] dimensionValues, + boolean createStarNode, + boolean createNullNode, + List> predicates + ) { + + try (Directory directory = newFSDirectory(createTempDir())) { + + long starTreeDataLength; + + try (IndexOutput dataOut = directory.createOutput("star-tree-data", IOContext.DEFAULT)) { + StarTreeWriter starTreeWriter = new StarTreeWriter(); + int starNodeLengthContribution = 0; + + InMemoryTreeNode rootNode = new InMemoryTreeNode( + 0, + randomInt(), + randomInt(), + randomFrom((byte) 0, (byte) -1, (byte) 1), + -1 + ); + rootNode.setChildDimensionId(1); + rootNode.setAggregatedDocId(randomInt()); + + if (createStarNode && dimensionValues.length > 1) { + InMemoryTreeNode starChild = new InMemoryTreeNode( + rootNode.getDimensionId() + 1, + randomInt(), + randomInt(), + (byte) -1, + -1 + ); + starChild.setChildDimensionId(-1); + starChild.setAggregatedDocId(randomInt()); + rootNode.addChildNode(starChild, (long) ALL); + starNodeLengthContribution++; + } + + for (long dimensionValue : dimensionValues) { + InMemoryTreeNode defaultNode = new InMemoryTreeNode( + rootNode.getDimensionId() + 1, + randomInt(), + randomInt(), + (byte) 0, + dimensionValue + ); + defaultNode.setChildDimensionId(-1); + defaultNode.setAggregatedDocId(randomInt()); + rootNode.addChildNode(defaultNode, dimensionValue); + } + + if (createNullNode) { + InMemoryTreeNode nullNode = new InMemoryTreeNode(rootNode.getDimensionId() + 1, randomInt(), randomInt(), (byte) 1, -1); + nullNode.setChildDimensionId(-1); + nullNode.setAggregatedDocId(randomInt()); + rootNode.addChildNode(nullNode, null); + } + + starTreeDataLength = starTreeWriter.writeStarTree( + dataOut, + rootNode, + starNodeLengthContribution + rootNode.getChildren().size() + 1, + "star-tree" + ); + + // asserting on the actual length of the star tree data file + assertEquals(starTreeDataLength, (33L * rootNode.getChildren().size()) + (starNodeLengthContribution * 33L) + 33L); + } + + for (Predicate predicate : predicates) { + try (IndexInput dataIn = directory.openInput("star-tree-data", IOContext.READONCE)) { + StarTreeMetadata starTreeMetadata = mock(StarTreeMetadata.class); + when(starTreeMetadata.getDataLength()).thenReturn(starTreeDataLength); + when(starTreeMetadata.getDataStartFilePointer()).thenReturn(0L); + FixedLengthStarTreeNode effectiveRoot = (FixedLengthStarTreeNode) StarTreeFactory.createStarTree( + dataIn, + starTreeMetadata + ); + assertTrue(predicate.test(effectiveRoot)); + } + } + + } catch (IOException e) { + throw new RuntimeException(e); + } + + } + + // private static class ArrayBasedCollector implements StarTreeNodeCollector { + // + // private final List nodes = new ArrayList<>(); + // + // @Override + // public void collectStarTreeNode(StarTreeNode node) { + // nodes.add(node); + // } + // + // public boolean matchAllCollectedValues(long[] values) throws IOException { + // boolean matches = true; + // for (int i = 0; i < values.length; i++) { + // matches &= nodes.get(i).getDimensionValue() == values[i]; + // } + // return matches; + // } + // + // public int collectedNodeCount() { + // return nodes.size(); + // } + // + // } + +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index 1beec828e849e..b548c844b2476 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -8,23 +8,43 @@ package org.opensearch.search; +import org.apache.lucene.util.FixedBitSet; import org.opensearch.action.OriginalIndices; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Rounding; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; import org.opensearch.index.IndexService; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.CompositeIndexSettings; +import org.opensearch.index.compositeindex.datacube.DateDimension; +import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; +import org.opensearch.index.engine.Segment; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; import org.opensearch.index.mapper.CompositeMappedFieldType; +import org.opensearch.index.mapper.DateFieldMapper; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.StarTreeMapper; import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; import org.opensearch.search.aggregations.AggregationBuilders; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; +import org.opensearch.search.aggregations.SearchContextAggregations; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.opensearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; @@ -32,6 +52,7 @@ import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.startree.DateHistogramAggregatorTests; import org.opensearch.search.aggregations.startree.StarTreeFilterTests; +import org.opensearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ReaderContext; @@ -41,7 +62,9 @@ import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.io.IOException; -import java.util.Map; +import java.util.Collections; +import java.util.List; +import java.util.Set; import static org.opensearch.search.aggregations.AggregationBuilders.dateHistogram; import static org.opensearch.search.aggregations.AggregationBuilders.max; @@ -49,6 +72,8 @@ import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.CoreMatchers.nullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests for validating query shapes which can be resolved using star-tree index @@ -76,7 +101,14 @@ public void testQueryParsingForMetricAggregations() throws IOException { .indices() .prepareCreate("test") .setSettings(settings) - .setMapping(StarTreeFilterTests.getExpandedMapping(1, false)); + .setMapping( + StarTreeFilterTests.getExpandedMapping( + 1, + false, + StarTreeFilterTests.DIMENSION_TYPE_MAP, + StarTreeFilterTests.METRIC_TYPE_MAP + ) + ); createIndex("test", builder); IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -94,6 +126,14 @@ public void testQueryParsingForMetricAggregations() throws IOException { null ); + QueryBuilder baseQuery; + SearchContext searchContext = createSearchContext(indexService); + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + // Case 1: No query or aggregations, should not use star tree SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); assertStarTreeContext(request, sourceBuilder, null, -1); @@ -102,14 +142,25 @@ public void testQueryParsingForMetricAggregations() throws IOException { sourceBuilder = new SearchSourceBuilder().query(new MatchAllQueryBuilder()); assertStarTreeContext(request, sourceBuilder, null, -1); - // Case 3: MatchAllQuery and metric aggregations present, should use star tree - sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(max("test").field("field")); - CompositeIndexFieldInfo expectedStarTree = new CompositeIndexFieldInfo( - "startree", - CompositeMappedFieldType.CompositeFieldType.STAR_TREE + // Case 3: MatchAllQuery and aggregations present, should use star tree + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(AggregationBuilders.max("test").field("field")); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree", + -1, + List.of(new NumericDimension(FIELD_NAME)), + List.of(new Metric("field", List.of(MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 ); - Map expectedQueryMap = null; - assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); // Case 4: MatchAllQuery and metric aggregations present, but postFilter specified, should not use star tree sourceBuilder = new SearchSourceBuilder().size(0) @@ -118,24 +169,70 @@ public void testQueryParsingForMetricAggregations() throws IOException { .postFilter(new MatchAllQueryBuilder()); assertStarTreeContext(request, sourceBuilder, null, -1); - // Case 5: TermQuery and single metric aggregation, should use star tree, but not initialize query cache - sourceBuilder = new SearchSourceBuilder().size(0).query(new TermQueryBuilder("sndv", 1)).aggregation(max("test").field("field")); - expectedQueryMap = Map.of("sndv", 1L); - assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + // Case 5: TermQuery and single aggregation, should use star tree, but not initialize query cache + baseQuery = new TermQueryBuilder("sndv", 1); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(AggregationBuilders.max("test").field("field")); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree", + -1, + List.of(new OrdinalDimension("sndv")), + List.of(new Metric("field", List.of(MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); - // Case 6: TermQuery and multiple metric aggregations present, should use star tree & initialize cache + // Case 6: TermQuery and multiple aggregations present, should use star tree & initialize cache + baseQuery = new TermQueryBuilder("sndv", 1); sourceBuilder = new SearchSourceBuilder().size(0) - .query(new TermQueryBuilder("sndv", 1)) - .aggregation(max("test").field("field")) + .query(baseQuery) + .aggregation(AggregationBuilders.max("test").field("field")) .aggregation(AggregationBuilders.sum("test2").field("field")); - expectedQueryMap = Map.of("sndv", 1L); - assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, 0), 0); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree", + -1, + List.of(new OrdinalDimension("sndv")), + List.of(new Metric("field", List.of(MetricStat.MAX, MetricStat.SUM))), + baseQuery, + sourceBuilder, + true + ), + 0 + ); // Case 7: No query, metric aggregations present, should use star tree - sourceBuilder = new SearchSourceBuilder().size(0).aggregation(max("test").field("field")); - assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, null, -1), -1); + sourceBuilder = new SearchSourceBuilder().size(0).aggregation(AggregationBuilders.max("test").field("field")); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree", + -1, + List.of(new OrdinalDimension("sndv")), + List.of(new Metric("field", List.of(MetricStat.MAX))), + null, + sourceBuilder, + true + ), + -1 + ); setStarTreeIndexSetting(null); + searchContext.close(); } /** @@ -177,49 +274,102 @@ public void testQueryParsingForDateHistogramAggregations() throws IOException { SumAggregationBuilder sumAggSub = sum("sum").field(FIELD_NAME).subAggregation(maxAggNoSub); MedianAbsoluteDeviationAggregationBuilder medianAgg = medianAbsoluteDeviation("median").field(FIELD_NAME); + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + + QueryBuilder baseQuery; + SearchContext searchContext = createSearchContext(indexService); // Case 1: No query or aggregations, should not use star tree SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); assertStarTreeContext(request, sourceBuilder, null, -1); // Case 2: MatchAllQuery present but no aggregations, should not use star tree - sourceBuilder = new SearchSourceBuilder().query(new MatchAllQueryBuilder()); + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().query(baseQuery); assertStarTreeContext(request, sourceBuilder, null, -1); // Case 3: MatchAllQuery and non-nested metric aggregations is nested within date-histogram aggregation, should use star tree DateHistogramAggregationBuilder dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) .calendarInterval(DateHistogramInterval.DAY) .subAggregation(maxAggNoSub); - sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(dateHistogramAggregationBuilder); + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(dateHistogramAggregationBuilder); CompositeIndexFieldInfo expectedStarTree = new CompositeIndexFieldInfo( "startree1", CompositeMappedFieldType.CompositeFieldType.STAR_TREE ); - Map expectedQueryMap = null; - assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of( + new DateDimension( + TIMESTAMP_FIELD, + List.of(new DateTimeUnitAdapter(Rounding.DateTimeUnit.DAY_OF_MONTH)), + DateFieldMapper.Resolution.MILLISECONDS + ), + new NumericDimension(FIELD_NAME) + ), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); // Case 4: MatchAllQuery and nested-metric aggregations is nested within date-histogram aggregation, should not use star tree dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) .calendarInterval(DateHistogramInterval.DAY) .subAggregation(sumAggSub); - sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(dateHistogramAggregationBuilder); + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(dateHistogramAggregationBuilder); assertStarTreeContext(request, sourceBuilder, null, -1); // Case 5: MatchAllQuery and non-startree supported aggregation nested within date-histogram aggregation, should not use star tree dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) .calendarInterval(DateHistogramInterval.DAY) .subAggregation(medianAgg); - sourceBuilder = new SearchSourceBuilder().size(0).query(new MatchAllQueryBuilder()).aggregation(dateHistogramAggregationBuilder); + baseQuery = new MatchAllQueryBuilder(); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(dateHistogramAggregationBuilder); assertStarTreeContext(request, sourceBuilder, null, -1); // Case 6: NumericTermQuery and date-histogram aggregation present, should use star tree dateHistogramAggregationBuilder = dateHistogram("by_day").field(TIMESTAMP_FIELD) .calendarInterval(DateHistogramInterval.DAY) .subAggregation(maxAggNoSub); - sourceBuilder = new SearchSourceBuilder().size(0) - .query(new TermQueryBuilder(FIELD_NAME, 1)) - .aggregation(dateHistogramAggregationBuilder); - expectedQueryMap = Map.of(FIELD_NAME, 1L); - assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + baseQuery = new TermQueryBuilder(FIELD_NAME, 1L); + sourceBuilder = new SearchSourceBuilder().size(0).query(baseQuery).aggregation(dateHistogramAggregationBuilder); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of( + new DateDimension( + TIMESTAMP_FIELD, + List.of(new DateTimeUnitAdapter(Rounding.DateTimeUnit.DAY_OF_MONTH)), + DateFieldMapper.Resolution.MILLISECONDS + ), + new NumericDimension(FIELD_NAME) + ), + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX))), + baseQuery, + sourceBuilder, + true + ), + -1 + ); // Case 7: Date histogram with non calendar interval: rounding is null for DateHistogramFactory - cannot use star-tree dateHistogramAggregationBuilder = dateHistogram("non_cal").field(TIMESTAMP_FIELD) @@ -245,13 +395,94 @@ public void testQueryParsingForDateHistogramAggregations() throws IOException { .calendarInterval(DateHistogramInterval.DAY) .subAggregation(maxAggNoSub) .subAggregation(sumAggNoSub); - expectedQueryMap = null; + baseQuery = null; sourceBuilder = new SearchSourceBuilder().size(0).aggregation(dateHistogramAggregationBuilder); - assertStarTreeContext(request, sourceBuilder, new StarTreeQueryContext(expectedStarTree, expectedQueryMap, -1), -1); + assertStarTreeContext( + request, + sourceBuilder, + getStarTreeQueryContext( + searchContext, + starTreeFieldConfiguration, + "startree1", + -1, + List.of( + new DateDimension( + TIMESTAMP_FIELD, + List.of(new DateTimeUnitAdapter(Rounding.DateTimeUnit.DAY_OF_MONTH)), + DateFieldMapper.Resolution.MILLISECONDS + ), + new NumericDimension(FIELD_NAME) + ), + List.of( + new Metric(TIMESTAMP_FIELD, List.of(MetricStat.SUM, MetricStat.MAX)), + new Metric(FIELD_NAME, List.of(MetricStat.MAX, MetricStat.SUM)) + ), + baseQuery, + sourceBuilder, + true + ), + -1 + ); setStarTreeIndexSetting(null); } + public void testCacheCreationInStarTreeQueryContext() throws IOException { + StarTreeFieldConfiguration starTreeFieldConfiguration = new StarTreeFieldConfiguration( + 1, + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ); + CompositeDataCubeFieldType compositeDataCubeFieldType = new StarTreeMapper.StarTreeFieldType( + "star_tree", + new StarTreeField( + "star_tree", + List.of(new OrdinalDimension("field")), + List.of(new Metric("metricField", List.of(MetricStat.SUM, MetricStat.MAX))), + starTreeFieldConfiguration + ) + ); + + QueryBuilder baseQuery = new MatchAllQueryBuilder(); + SearchContext searchContext = mock(SearchContext.class); + MapperService mapperService = mock(MapperService.class); + IndexShard indexShard = mock(IndexShard.class); + Segment segment = mock(Segment.class); + SearchContextAggregations searchContextAggregations = mock(SearchContextAggregations.class); + AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); + + when(mapperService.getCompositeFieldTypes()).thenReturn(Set.of(compositeDataCubeFieldType)); + when(searchContext.mapperService()).thenReturn(mapperService); + when(searchContext.indexShard()).thenReturn(indexShard); + when(indexShard.segments(false)).thenReturn(List.of(segment, segment)); + when(searchContext.aggregations()).thenReturn(searchContextAggregations); + when(searchContextAggregations.factories()).thenReturn(aggregatorFactories); + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { null, null }); + StarTreeQueryContext starTreeQueryContext = new StarTreeQueryContext(searchContext, baseQuery); + + assertEquals(2, starTreeQueryContext.getAllCachedValues().length); + + // Asserting null values are ignored + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + starTreeQueryContext = new StarTreeQueryContext(searchContext, baseQuery); + starTreeQueryContext.maybeSetCachedNodeIdsForSegment(-1, null); + assertNull(starTreeQueryContext.getAllCachedValues()); + assertNull(starTreeQueryContext.maybeGetCachedNodeIdsForSegment(0)); + + // Assert correct cached value is returned + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { null, null }); + starTreeQueryContext = new StarTreeQueryContext(searchContext, baseQuery); + FixedBitSet cachedValues = new FixedBitSet(22); + starTreeQueryContext.maybeSetCachedNodeIdsForSegment(0, cachedValues); + assertEquals(2, starTreeQueryContext.getAllCachedValues().length); + assertEquals(22, starTreeQueryContext.maybeGetCachedNodeIdsForSegment(0).length()); + + starTreeQueryContext = new StarTreeQueryContext(compositeDataCubeFieldType, new MatchAllQueryBuilder(), 2); + assertEquals(2, starTreeQueryContext.getAllCachedValues().length); + + mapperService.close(); + } + /** * Test query parsing for date histogram aggregations on star-tree index when @timestamp field does not exist */ @@ -268,7 +499,14 @@ public void testInvalidQueryParsingForDateHistogramAggregations() throws IOExcep .indices() .prepareCreate("test") .setSettings(settings) - .setMapping(StarTreeFilterTests.getExpandedMapping(1, false)); + .setMapping( + StarTreeFilterTests.getExpandedMapping( + 1, + false, + StarTreeFilterTests.DIMENSION_TYPE_MAP, + StarTreeFilterTests.METRIC_TYPE_MAP + ) + ); createIndex("test", builder); IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -293,10 +531,6 @@ public void testInvalidQueryParsingForDateHistogramAggregations() throws IOExcep SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().size(0) .query(new MatchAllQueryBuilder()) .aggregation(dateHistogramAggregationBuilder); - CompositeIndexFieldInfo expectedStarTree = new CompositeIndexFieldInfo( - "startree1", - CompositeMappedFieldType.CompositeFieldType.STAR_TREE - ); assertStarTreeContext(request, sourceBuilder, null, -1); setStarTreeIndexSetting(null); @@ -320,22 +554,61 @@ private void assertStarTreeContext( SearchService searchService = getInstanceFromNode(SearchService.class); try (ReaderContext reader = searchService.createOrGetReaderContext(request, false)) { SearchContext context = searchService.createContext(reader, request, null, true); - StarTreeQueryContext actualContext = context.getStarTreeQueryContext(); + StarTreeQueryContext actualContext = context.getQueryShardContext().getStarTreeQueryContext(); if (expectedContext == null) { - assertThat(context.getStarTreeQueryContext(), nullValue()); + assertThat(context.getQueryShardContext().getStarTreeQueryContext(), nullValue()); } else { assertThat(actualContext, notNullValue()); assertEquals(expectedContext.getStarTree().getType(), actualContext.getStarTree().getType()); assertEquals(expectedContext.getStarTree().getField(), actualContext.getStarTree().getField()); - assertEquals(expectedContext.getQueryMap(), actualContext.getQueryMap()); + assertEquals( + expectedContext.getBaseQueryStarTreeFilter().getDimensions(), + actualContext.getBaseQueryStarTreeFilter().getDimensions() + ); if (expectedCacheUsage > -1) { - assertEquals(expectedCacheUsage, actualContext.getStarTreeValues().length); + assertEquals(expectedCacheUsage, actualContext.getAllCachedValues().length); } else { - assertNull(actualContext.getStarTreeValues()); + assertNull(actualContext.getAllCachedValues()); } } searchService.doStop(); } } + + private StarTreeQueryContext getStarTreeQueryContext( + SearchContext searchContext, + StarTreeFieldConfiguration starTreeFieldConfiguration, + String compositeFieldName, + int cacheSize, + List dimensions, + List metrics, + QueryBuilder baseQuery, + SearchSourceBuilder sourceBuilder, + boolean assertConsolidation + ) { + AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); + AggregatorFactory[] aggregatorFactoriesArray = sourceBuilder.aggregations().getAggregatorFactories().stream().map(af -> { + try { + return ((ValuesSourceAggregationBuilder) af).build(searchContext.getQueryShardContext(), null); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).toArray(AggregatorFactory[]::new); + when(aggregatorFactories.getFactories()).thenReturn(aggregatorFactoriesArray); + SearchContextAggregations mockAggregations = mock(SearchContextAggregations.class); + when(mockAggregations.factories()).thenReturn(aggregatorFactories); + searchContext.aggregations(mockAggregations); + CompositeDataCubeFieldType compositeDataCubeFieldType = new StarTreeMapper.StarTreeFieldType( + compositeFieldName, + new StarTreeField(compositeFieldName, dimensions, metrics, starTreeFieldConfiguration) + ); + StarTreeQueryContext starTreeQueryContext = new StarTreeQueryContext(compositeDataCubeFieldType, baseQuery, cacheSize); + boolean consolidated = starTreeQueryContext.consolidateAllFilters(searchContext); + if (assertConsolidation) { + assertTrue(consolidated); + searchContext.getQueryShardContext().setStarTreeQueryContext(starTreeQueryContext); + } + return starTreeQueryContext; + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/ArrayBasedCollector.java b/server/src/test/java/org/opensearch/search/aggregations/startree/ArrayBasedCollector.java new file mode 100644 index 0000000000000..6aad0fbaafd2c --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/ArrayBasedCollector.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; +import org.opensearch.search.startree.StarTreeNodeCollector; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +public class ArrayBasedCollector implements StarTreeNodeCollector { + + private final Set nodeDimensionValues = new HashSet<>(); + + @Override + public void collectStarTreeNode(StarTreeNode node) { + try { + nodeDimensionValues.add(node.getDimensionValue()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public boolean matchAllCollectedValues(long... values) throws IOException { + for (long value : values) { + if (!nodeDimensionValues.contains(value)) return false; + } + return true; + } + + public int collectedNodeCount() { + return nodeDimensionValues.size(); + } + +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java index 564a86deff1af..a374e2f5653b9 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/DateHistogramAggregatorTests.java @@ -55,7 +55,7 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.LinkedList; +import java.util.LinkedHashMap; import java.util.List; import java.util.Random; @@ -155,10 +155,16 @@ public void testStarTreeDateHistogram() throws IOException { count("_name").field(FIELD_NAME), avg("_name").field(FIELD_NAME) }; - List supportedDimensions = new LinkedList<>(); - supportedDimensions.add(new NumericDimension(STATUS)); - supportedDimensions.add(new NumericDimension(SIZE)); - supportedDimensions.add( + LinkedHashMap supportedDimensions = new LinkedHashMap<>(); + supportedDimensions.put( + new NumericDimension(STATUS), + new NumberFieldMapper.NumberFieldType(STATUS, NumberFieldMapper.NumberType.INTEGER) + ); + supportedDimensions.put( + new NumericDimension(SIZE), + new NumberFieldMapper.NumberFieldType(STATUS, NumberFieldMapper.NumberType.INTEGER) + ); + supportedDimensions.put( new DateDimension( TIMESTAMP_FIELD, List.of( @@ -166,7 +172,8 @@ public void testStarTreeDateHistogram() throws IOException { new DateTimeUnitAdapter(Rounding.DateTimeUnit.DAY_OF_MONTH) ), DateFieldMapper.Resolution.MILLISECONDS - ) + ), + new DateFieldMapper.DateFieldType(TIMESTAMP_FIELD) ); for (ValuesSourceAggregationBuilder aggregationBuilder : agggBuilders) { @@ -224,7 +231,7 @@ private void testCase( QueryBuilder queryBuilder, DateHistogramAggregationBuilder dateHistogramAggregationBuilder, CompositeIndexFieldInfo starTree, - List supportedDimensions + LinkedHashMap supportedDimensions ) throws IOException { InternalDateHistogram starTreeAggregation = searchAndReduceStarTree( createIndexSettings(), diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/DimensionFilterAndMapperTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/DimensionFilterAndMapperTests.java new file mode 100644 index 0000000000000..e89bc8e60e9da --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/DimensionFilterAndMapperTests.java @@ -0,0 +1,193 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations.startree; + +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.util.BytesRef; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeField; +import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; +import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedSetStarTreeValuesIterator; +import org.opensearch.index.mapper.CompositeDataCubeFieldType; +import org.opensearch.index.mapper.KeywordFieldMapper; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.StarTreeMapper; +import org.opensearch.index.mapper.WildcardFieldMapper; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.filter.DimensionFilter; +import org.opensearch.search.startree.filter.DimensionFilter.MatchType; +import org.opensearch.search.startree.filter.MatchNoneFilter; +import org.opensearch.search.startree.filter.provider.DimensionFilterMapper; +import org.opensearch.search.startree.filter.provider.StarTreeFilterProvider; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Optional; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DimensionFilterAndMapperTests extends OpenSearchTestCase { + + public void testKeywordOrdinalMapping() throws IOException { + DimensionFilterMapper dimensionFilterMapper = DimensionFilterMapper.Factory.fromMappedFieldType( + new KeywordFieldMapper.KeywordFieldType("keyword") + ); + StarTreeValues starTreeValues = mock(StarTreeValues.class); + SortedSetStarTreeValuesIterator sortedSetStarTreeValuesIterator = mock(SortedSetStarTreeValuesIterator.class); + TermsEnum termsEnum = mock(TermsEnum.class); + when(sortedSetStarTreeValuesIterator.termsEnum()).thenReturn(termsEnum); + when(starTreeValues.getDimensionValuesIterator("field")).thenReturn(sortedSetStarTreeValuesIterator); + Optional matchingOrdinal; + + // Case Exact Match and found + BytesRef bytesRef = new BytesRef(new byte[] { 17, 29 }); + when(sortedSetStarTreeValuesIterator.lookupTerm(bytesRef)).thenReturn(1L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, MatchType.EXACT); + assertTrue(matchingOrdinal.isPresent()); + assertEquals(1, (long) matchingOrdinal.get()); + + // Case Exact Match and not found + when(sortedSetStarTreeValuesIterator.lookupTerm(bytesRef)).thenReturn(-10L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, MatchType.EXACT); + assertFalse(matchingOrdinal.isPresent()); + + // Case GTE -> FOUND and NOT_FOUND + for (TermsEnum.SeekStatus seekStatus : new TermsEnum.SeekStatus[] { TermsEnum.SeekStatus.FOUND, TermsEnum.SeekStatus.NOT_FOUND }) { + when(termsEnum.seekCeil(bytesRef)).thenReturn(seekStatus); + when(termsEnum.ord()).thenReturn(10L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, MatchType.GTE); + assertTrue(matchingOrdinal.isPresent()); + assertEquals(10L, (long) matchingOrdinal.get()); + } + + // Seek Status END is same for GTE, GT + for (MatchType matchType : new MatchType[] { MatchType.GT, MatchType.GTE }) { + when(termsEnum.seekCeil(bytesRef)).thenReturn(TermsEnum.SeekStatus.END); + when(termsEnum.ord()).thenReturn(10L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, matchType); + assertFalse(matchingOrdinal.isPresent()); + } + + // Case GT -> FOUND and matched + when(termsEnum.seekCeil(bytesRef)).thenReturn(TermsEnum.SeekStatus.FOUND); + when(sortedSetStarTreeValuesIterator.getValueCount()).thenReturn(2L); + when(termsEnum.ord()).thenReturn(0L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, MatchType.GT); + assertTrue(matchingOrdinal.isPresent()); + assertEquals(1L, (long) matchingOrdinal.get()); + // Case GT -> FOUND and unmatched + when(termsEnum.ord()).thenReturn(3L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, MatchType.GT); + assertFalse(matchingOrdinal.isPresent()); + + // Case GT -> NOT_FOUND + when(termsEnum.seekCeil(bytesRef)).thenReturn(TermsEnum.SeekStatus.NOT_FOUND); + when(termsEnum.ord()).thenReturn(10L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, MatchType.GT); + assertTrue(matchingOrdinal.isPresent()); + assertEquals(10L, (long) matchingOrdinal.get()); + + // Seek Status END is same for LTE, LT + for (MatchType matchType : new MatchType[] { MatchType.LT, MatchType.LTE }) { + when(termsEnum.seekCeil(bytesRef)).thenReturn(TermsEnum.SeekStatus.END); + when(termsEnum.ord()).thenReturn(10L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, matchType); + assertTrue(matchingOrdinal.isPresent()); + assertEquals(10L, (long) matchingOrdinal.get()); + } + + // Seek Status NOT_FOUND is same for LTE, LT + for (MatchType matchType : new MatchType[] { MatchType.LT, MatchType.LTE }) { + when(termsEnum.seekCeil(bytesRef)).thenReturn(TermsEnum.SeekStatus.NOT_FOUND); + when(sortedSetStarTreeValuesIterator.getValueCount()).thenReturn(2L); + when(termsEnum.ord()).thenReturn(1L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, matchType); + assertTrue(matchingOrdinal.isPresent()); + assertEquals(0L, (long) matchingOrdinal.get()); + // Case unmatched + when(termsEnum.ord()).thenReturn(0L); + matchingOrdinal = dimensionFilterMapper.getMatchingOrdinal("field", bytesRef, starTreeValues, matchType); + assertFalse(matchingOrdinal.isPresent()); + } + } + + public void testStarTreeFilterProviders() throws IOException { + CompositeDataCubeFieldType compositeDataCubeFieldType = new StarTreeMapper.StarTreeFieldType( + "star_tree", + new StarTreeField( + "star_tree", + List.of(new OrdinalDimension("keyword")), + List.of(new Metric("field", List.of(MetricStat.MAX))), + new StarTreeFieldConfiguration( + randomIntBetween(1, 10_000), + Collections.emptySet(), + StarTreeFieldConfiguration.StarTreeBuildMode.ON_HEAP + ) + ) + ); + MapperService mapperService = mock(MapperService.class); + SearchContext searchContext = mock(SearchContext.class); + when(searchContext.mapperService()).thenReturn(mapperService); + + // Null returned when mapper doesn't exist + assertNull(DimensionFilterMapper.Factory.fromMappedFieldType(new WildcardFieldMapper.WildcardFieldType("field"))); + + // Null returned for no mapped field type + assertNull(DimensionFilterMapper.Factory.fromMappedFieldType(null)); + + // Provider for null Query builder + assertEquals(StarTreeFilterProvider.MATCH_ALL_PROVIDER, StarTreeFilterProvider.SingletonFactory.getProvider(null)); + + QueryBuilder[] queryBuilders = new QueryBuilder[] { + new TermQueryBuilder("field", "value"), + new TermsQueryBuilder("field", List.of("value")), + new RangeQueryBuilder("field") }; + + for (QueryBuilder queryBuilder : queryBuilders) { + // Dimension Not Found + StarTreeFilterProvider provider = StarTreeFilterProvider.SingletonFactory.getProvider(queryBuilder); + assertNull(provider.getFilter(searchContext, queryBuilder, compositeDataCubeFieldType)); + } + + queryBuilders = new QueryBuilder[] { + new TermQueryBuilder("keyword", "value"), + new TermsQueryBuilder("keyword", List.of("value")), + new RangeQueryBuilder("keyword") }; + + for (QueryBuilder queryBuilder : queryBuilders) { + // Mapped field type not supported + StarTreeFilterProvider provider = StarTreeFilterProvider.SingletonFactory.getProvider(queryBuilder); + when(mapperService.fieldType("keyword")).thenReturn(new WildcardFieldMapper.WildcardFieldType("keyword")); + assertNull(provider.getFilter(searchContext, queryBuilder, compositeDataCubeFieldType)); + + // Unsupported Mapped Type + when(mapperService.fieldType("keyword")).thenReturn(null); + assertNull(provider.getFilter(searchContext, queryBuilder, compositeDataCubeFieldType)); + } + + // Testing MatchNoneFilter + DimensionFilter dimensionFilter = new MatchNoneFilter(); + dimensionFilter.initialiseForSegment(null, null); + ArrayBasedCollector collector = new ArrayBasedCollector(); + assertFalse(dimensionFilter.matchDimValue(1, null)); + dimensionFilter.matchStarTreeNodes(null, null, collector); + assertEquals(0, collector.collectedNodeCount()); + } + +} diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 21a4155a53ee5..6e10562c3a846 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -15,13 +15,20 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene101.Lucene101Codec; import org.apache.lucene.document.Document; +import org.apache.lucene.document.DoubleField; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.FloatField; +import org.apache.lucene.document.IntField; +import org.apache.lucene.document.KeywordField; +import org.apache.lucene.document.LongField; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentReader; +import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; @@ -40,12 +47,16 @@ import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; +import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.QueryShardContext; +import org.opensearch.index.query.RangeQueryBuilder; import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.query.TermsQueryBuilder; import org.opensearch.search.aggregations.AggregationBuilder; import org.opensearch.search.aggregations.AggregatorFactories; import org.opensearch.search.aggregations.AggregatorFactory; @@ -69,11 +80,16 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.LinkedList; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Locale; +import java.util.Map; import java.util.Random; import java.util.function.BiConsumer; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.opensearch.search.aggregations.AggregationBuilders.avg; import static org.opensearch.search.aggregations.AggregationBuilders.count; @@ -100,11 +116,17 @@ public void teardown() throws IOException { FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } - protected Codec getCodec() { + protected Codec getCodec( + Supplier maxLeafDocsSupplier, + LinkedHashMap dimensionAndType, + Map metricFieldAndType + ) { final Logger testLogger = LogManager.getLogger(MetricAggregatorTests.class); MapperService mapperService; try { - mapperService = StarTreeDocValuesFormatTests.createMapperService(StarTreeFilterTests.getExpandedMapping(1, false)); + mapperService = StarTreeDocValuesFormatTests.createMapperService( + StarTreeFilterTests.getExpandedMapping(maxLeafDocsSupplier.get(), false, dimensionAndType, metricFieldAndType) + ); } catch (IOException e) { throw new RuntimeException(e); } @@ -112,36 +134,65 @@ protected Codec getCodec() { } public void testStarTreeDocValues() throws IOException { + final List> MAX_LEAF_DOC_VARIATIONS = List.of( + () -> 1, + () -> randomIntBetween(2, 100), + () -> randomIntBetween(101, 10_000) + ); + final List dimensionFieldDatum = List.of( + new DimensionFieldData("sndv", () -> random().nextInt(10) - 5, DimensionTypes.INTEGER), + new DimensionFieldData("dv", () -> random().nextInt(20) - 10, DimensionTypes.INTEGER), + new DimensionFieldData("keyword_field", () -> random().nextInt(50), DimensionTypes.KEYWORD), + new DimensionFieldData("long_field", () -> random().nextInt(50), DimensionTypes.LONG), + new DimensionFieldData("half_float_field", () -> random().nextFloat(50), DimensionTypes.HALF_FLOAT), + new DimensionFieldData("float_field", () -> random().nextFloat(50), DimensionTypes.FLOAT), + new DimensionFieldData("double_field", () -> random().nextDouble(50), DimensionTypes.DOUBLE) + ); + for (Supplier maxLeafDocsSupplier : MAX_LEAF_DOC_VARIATIONS) { + testStarTreeDocValuesInternal( + getCodec( + maxLeafDocsSupplier, + dimensionFieldDatum.stream() + .collect( + Collectors.toMap( + df -> df.getDimension().getField(), + DimensionFieldData::getFieldType, + (v1, v2) -> v1, + LinkedHashMap::new + ) + ), + StarTreeFilterTests.METRIC_TYPE_MAP + ), + dimensionFieldDatum + ); + } + } + + private void testStarTreeDocValuesInternal(Codec codec, List dimensionFieldData) throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); - conf.setCodec(getCodec()); + conf.setCodec(codec); conf.setMergePolicy(newLogMergePolicy()); RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); Random random = RandomizedTest.getRandom(); int totalDocs = 100; - final String SNDV = "sndv"; - final String DV = "dv"; int val; - List docs = new ArrayList<>(); // Index 100 random documents for (int i = 0; i < totalDocs; i++) { Document doc = new Document(); - if (random.nextBoolean()) { - val = random.nextInt(10) - 5; // Random long between -5 and 4 - doc.add(new SortedNumericDocValuesField(SNDV, val)); - } - if (random.nextBoolean()) { - val = random.nextInt(20) - 10; // Random long between -10 and 9 - doc.add(new SortedNumericDocValuesField(DV, val)); + for (DimensionFieldData fieldData : dimensionFieldData) { + // FIXME: Reduce the frequency of nulls to be with at least some non-null docs like after every 1-2 ? + if (random.nextBoolean()) { + doc.add(fieldData.getField()); + } } if (random.nextBoolean()) { val = random.nextInt(50); // Random long between 0 and 49 doc.add(new SortedNumericDocValuesField(FIELD_NAME, val)); } iw.addDocument(doc); - docs.add(doc); } if (randomBoolean()) { @@ -157,6 +208,23 @@ public void testStarTreeDocValues() throws IOException { IndexSearcher indexSearcher = newSearcher(reader, false, false); CompositeIndexReader starTreeDocValuesReader = (CompositeIndexReader) reader.getDocValuesReader(); + MapperService mapperService = mapperServiceMock(); + CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + for (DimensionFieldData fieldData : dimensionFieldData) { + when(mapperService.fieldType(fieldData.fieldName)).thenReturn(fieldData.getMappedField()); + } + QueryShardContext queryShardContext = queryShardContextMock( + indexSearcher, + mapperService, + createIndexSettings(), + circuitBreakerService, + new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService).withCircuitBreaking() + ); + for (DimensionFieldData fieldData : dimensionFieldData) { + when(mapperService.fieldType(fieldData.fieldName)).thenReturn(fieldData.getMappedField()); + when(queryShardContext.fieldMapper(fieldData.fieldName)).thenReturn(fieldData.getMappedField()); + } + List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); @@ -166,131 +234,71 @@ public void testStarTreeDocValues() throws IOException { ValueCountAggregationBuilder valueCountAggregationBuilder = count("_name").field(FIELD_NAME); AvgAggregationBuilder avgAggregationBuilder = avg("_name").field(FIELD_NAME); - List supportedDimensions = new LinkedList<>(); - supportedDimensions.add(new NumericDimension(SNDV)); - supportedDimensions.add(new NumericDimension(DV)); - - Query query = new MatchAllDocsQuery(); - // match-all query - QueryBuilder queryBuilder = null; // no predicates - testCase( - indexSearcher, - query, - queryBuilder, - sumAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalSum::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - maxAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalMax::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - minAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalMin::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - valueCountAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalValueCount::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - avgAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalAvg::getValue) - ); + LinkedHashMap supportedDimensions = dimensionFieldData.stream() + .collect( + Collectors.toMap(DimensionFieldData::getDimension, DimensionFieldData::getMappedField, (v1, v2) -> v1, LinkedHashMap::new) + ); - // Numeric-terms query - for (int cases = 0; cases < 100; cases++) { - String queryField; - long queryValue; - if (randomBoolean()) { - queryField = SNDV; - queryValue = random.nextInt(10); - } else { - queryField = DV; - queryValue = random.nextInt(20) - 15; + Query query = null; + QueryBuilder queryBuilder = null; + + for (int cases = 0; cases < 15; cases++) { + // Get all types of queries (Term/Terms/Range) for all the given dimensions. + List allFieldQueries = dimensionFieldData.stream() + .flatMap(x -> Stream.of(x.getTermQueryBuilder(), x.getTermsQueryBuilder(), x.getRangeQueryBuilder())) + .toList(); + + for (QueryBuilder qb : allFieldQueries) { + query = qb.toQuery(queryShardContext); + queryBuilder = qb; + testCase( + indexSearcher, + query, + qb, + sumAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalSum::getValue) + ); + testCase( + indexSearcher, + query, + qb, + maxAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalMax::getValue) + ); + testCase( + indexSearcher, + query, + qb, + minAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalMin::getValue) + ); + testCase( + indexSearcher, + query, + qb, + valueCountAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalValueCount::getValue) + ); + testCase( + indexSearcher, + query, + qb, + avgAggregationBuilder, + starTree, + supportedDimensions, + verifyAggregation(InternalAvg::getValue) + ); } - - query = SortedNumericDocValuesField.newSlowExactQuery(queryField, queryValue); - queryBuilder = new TermQueryBuilder(queryField, queryValue); - - testCase( - indexSearcher, - query, - queryBuilder, - sumAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalSum::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - maxAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalMax::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - minAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalMin::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - valueCountAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalValueCount::getValue) - ); - testCase( - indexSearcher, - query, - queryBuilder, - avgAggregationBuilder, - starTree, - supportedDimensions, - verifyAggregation(InternalAvg::getValue) - ); } - CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); - - QueryShardContext queryShardContext = queryShardContextMock( - indexSearcher, - mapperServiceMock(), - createIndexSettings(), - circuitBreakerService, - new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService).withCircuitBreaking() - ); - MetricAggregatorFactory aggregatorFactory = mock(MetricAggregatorFactory.class); when(aggregatorFactory.getSubFactories()).thenReturn(AggregatorFactories.EMPTY); when(aggregatorFactory.getField()).thenReturn(FIELD_NAME); @@ -385,6 +393,22 @@ public void testStarTreeDocValues() throws IOException { false ); + // Keyword Range query with missing Low Ordinal + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("keyword_field"); + rangeQueryBuilder.from(Long.MAX_VALUE).includeLower(random().nextBoolean()); + testCase( + indexSearcher, + rangeQueryBuilder.toQuery(queryShardContext), + rangeQueryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + null, + true + ); + ir.close(); directory.close(); } @@ -403,7 +427,7 @@ private void testC QueryBuilder queryBuilder, T aggBuilder, CompositeIndexFieldInfo starTree, - List supportedDimensions, + LinkedHashMap supportedDimensions, BiConsumer verify ) throws IOException { testCase(searcher, query, queryBuilder, aggBuilder, starTree, supportedDimensions, Collections.emptyList(), verify, null, true); @@ -415,7 +439,8 @@ private void testC QueryBuilder queryBuilder, T aggBuilder, CompositeIndexFieldInfo starTree, - List supportedDimensions, + LinkedHashMap supportedDimensions, // FIXME : Merge with the same input that goes to generating the + // codec. List supportedMetrics, BiConsumer verify, AggregatorFactory aggregatorFactory, @@ -453,4 +478,164 @@ private void testC ); verify.accept(expectedAggregation, starTreeAggregation); } + + private interface DimensionFieldDataSupplier { + IndexableField getField(String fieldName, Supplier valueSupplier); + + MappedFieldType getMappedField(String fieldName); + + Dimension getDimension(String fieldName); + } + + private abstract static class NumericDimensionFieldDataSupplier implements DimensionFieldDataSupplier { + + @Override + public Dimension getDimension(String fieldName) { + return new NumericDimension(fieldName); + } + + @Override + public MappedFieldType getMappedField(String fieldName) { + return new NumberFieldMapper.NumberFieldType(fieldName, numberType()); + } + + abstract NumberFieldMapper.NumberType numberType(); + } + + private static class DimensionFieldData { + private final String fieldName; + private final Supplier valueSupplier; + private final DimensionFieldDataSupplier dimensionFieldDataSupplier; + private final String fieldType; + + DimensionFieldData(String fieldName, Supplier valueSupplier, DimensionTypes dimensionType) { + this.fieldName = fieldName; + this.valueSupplier = valueSupplier; + this.dimensionFieldDataSupplier = dimensionType.getFieldDataSupplier(); + this.fieldType = dimensionType.name().toLowerCase(Locale.ROOT); + } + + public Dimension getDimension() { + return dimensionFieldDataSupplier.getDimension(fieldName); + } + + public MappedFieldType getMappedField() { + return dimensionFieldDataSupplier.getMappedField(fieldName); + } + + public IndexableField getField() { + return dimensionFieldDataSupplier.getField(fieldName, valueSupplier); + } + + public QueryBuilder getTermQueryBuilder() { + return new TermQueryBuilder(fieldName, valueSupplier.get()); + } + + public QueryBuilder getTermsQueryBuilder() { + int limit = randomIntBetween(1, 20); + List values = new ArrayList<>(limit); + for (int i = 0; i < limit; i++) { + values.add(valueSupplier.get()); + } + return new TermsQueryBuilder(fieldName, values); + } + + public QueryBuilder getRangeQueryBuilder() { + return new RangeQueryBuilder(fieldName).from(valueSupplier.get()) + .to(valueSupplier.get()) + .includeLower(randomBoolean()) + .includeUpper(randomBoolean()); + } + + public String getFieldType() { + return fieldType; + } + } + + private enum DimensionTypes { + + INTEGER(new NumericDimensionFieldDataSupplier() { + @Override + NumberFieldMapper.NumberType numberType() { + return NumberFieldMapper.NumberType.INTEGER; + } + + @Override + public IndexableField getField(String fieldName, Supplier valueSupplier) { + return new IntField(fieldName, (Integer) valueSupplier.get(), Field.Store.YES); + } + }), + LONG(new NumericDimensionFieldDataSupplier() { + @Override + NumberFieldMapper.NumberType numberType() { + return NumberFieldMapper.NumberType.LONG; + } + + @Override + public IndexableField getField(String fieldName, Supplier valueSupplier) { + return new LongField(fieldName, (Integer) valueSupplier.get(), Field.Store.YES); + } + }), + HALF_FLOAT(new NumericDimensionFieldDataSupplier() { + @Override + public IndexableField getField(String fieldName, Supplier valueSupplier) { + return new SortedNumericDocValuesField(fieldName, HalfFloatPoint.halfFloatToSortableShort((Float) valueSupplier.get())); + } + + @Override + NumberFieldMapper.NumberType numberType() { + return NumberFieldMapper.NumberType.HALF_FLOAT; + } + }), + FLOAT(new NumericDimensionFieldDataSupplier() { + @Override + public IndexableField getField(String fieldName, Supplier valueSupplier) { + return new FloatField(fieldName, (Float) valueSupplier.get(), Field.Store.YES); + } + + @Override + NumberFieldMapper.NumberType numberType() { + return NumberFieldMapper.NumberType.FLOAT; + } + }), + DOUBLE(new NumericDimensionFieldDataSupplier() { + @Override + public IndexableField getField(String fieldName, Supplier valueSupplier) { + return new DoubleField(fieldName, (Double) valueSupplier.get(), Field.Store.YES); + } + + @Override + NumberFieldMapper.NumberType numberType() { + return NumberFieldMapper.NumberType.DOUBLE; + } + }), + KEYWORD(new DimensionFieldDataSupplier() { + @Override + public IndexableField getField(String fieldName, Supplier valueSupplier) { + return new KeywordField(fieldName, String.valueOf(valueSupplier.get()), Field.Store.YES); + } + + @Override + public MappedFieldType getMappedField(String fieldName) { + return new KeywordFieldMapper.KeywordFieldType(fieldName, Lucene.STANDARD_ANALYZER); + } + + @Override + public Dimension getDimension(String fieldName) { + return new OrdinalDimension(fieldName); + } + }); + + private final DimensionFieldDataSupplier dimensionFieldDataSupplier; + + DimensionTypes(DimensionFieldDataSupplier dimensionFieldDataSupplier) { + this.dimensionFieldDataSupplier = dimensionFieldDataSupplier; + } + + public DimensionFieldDataSupplier getFieldDataSupplier() { + return dimensionFieldDataSupplier; + } + + } + } diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index ef8e858e3efe1..7282b0fafb8aa 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -33,20 +33,29 @@ import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.startree.index.StarTreeValues; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.search.aggregations.AggregatorTestCase; -import org.opensearch.search.startree.StarTreeFilter; +import org.opensearch.search.internal.SearchContext; +import org.opensearch.search.startree.StarTreeQueryHelper; +import org.opensearch.search.startree.StarTreeTraversalUtil; +import org.opensearch.search.startree.filter.DimensionFilter; +import org.opensearch.search.startree.filter.ExactMatchDimFilter; +import org.opensearch.search.startree.filter.RangeMatchDimFilter; +import org.opensearch.search.startree.filter.StarTreeFilter; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; + +import org.mockito.Mockito; import static org.opensearch.index.codec.composite912.datacube.startree.AbstractStarTreeDVFormatTests.topMapping; @@ -57,6 +66,16 @@ public class StarTreeFilterTests extends AggregatorTestCase { private static final String SDV = "sdv"; private static final String DV = "dv"; + public static final LinkedHashMap DIMENSION_TYPE_MAP = new LinkedHashMap<>(); + public static final Map METRIC_TYPE_MAP = Map.of(FIELD_NAME, "integer"); + + static { + // Ordered dimensions + DIMENSION_TYPE_MAP.put(SNDV, "integer"); + DIMENSION_TYPE_MAP.put(SDV, "integer"); + DIMENSION_TYPE_MAP.put(DV, "integer"); + } + @Before public void setup() { FeatureFlags.initializeFeatureFlags(Settings.builder().put(FeatureFlags.STAR_TREE_INDEX, true).build()); @@ -72,7 +91,7 @@ protected Codec getCodec(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimen MapperService mapperService; try { mapperService = StarTreeDocValuesFormatTests.createMapperService( - getExpandedMapping(maxLeafDoc, skipStarNodeCreationForSDVDimension) + getExpandedMapping(maxLeafDoc, skipStarNodeCreationForSDVDimension, DIMENSION_TYPE_MAP, METRIC_TYPE_MAP) ); } catch (IOException e) { throw new RuntimeException(e); @@ -88,6 +107,64 @@ public void testStarTreeFilterWithDocsInSVDFieldButNoStarNode() throws IOExcepti testStarTreeFilter(10, false); } + public void testStarTreeFilterMerging() { + + StarTreeFilter mergedStarTreeFilter; + String dimensionToMerge = "dim"; + + DimensionFilter exactMatchDimFilter = new ExactMatchDimFilter(dimensionToMerge, Collections.emptyList()); + DimensionFilter rangeMatchDimFilter = new RangeMatchDimFilter(dimensionToMerge, null, null, true, true); + + // When Star Tree doesn't have the same dimension as @dimensionToMerge + StarTreeFilter starTreeFilter = new StarTreeFilter(Collections.emptyMap()); + mergedStarTreeFilter = StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + starTreeFilter, + dimensionToMerge, + List.of(exactMatchDimFilter) + ); + assertEquals(1, mergedStarTreeFilter.getDimensions().size()); + DimensionFilter mergedDimensionFilter1 = mergedStarTreeFilter.getFiltersForDimension(dimensionToMerge).get(0); + assertEquals(ExactMatchDimFilter.class, mergedDimensionFilter1.getClass()); + + // When Star Tree has the same dimension as @dimensionToMerge + starTreeFilter = new StarTreeFilter(Map.of(dimensionToMerge, List.of(rangeMatchDimFilter))); + mergedStarTreeFilter = StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + starTreeFilter, + dimensionToMerge, + List.of(exactMatchDimFilter) + ); + assertEquals(1, mergedStarTreeFilter.getDimensions().size()); + DimensionFilter mergedDimensionFilter2 = mergedStarTreeFilter.getFiltersForDimension(dimensionToMerge).get(0); + assertEquals(RangeMatchDimFilter.class, mergedDimensionFilter2.getClass()); + + // When Star Tree has the same dimension as @dimensionToMerge with other dimensions + starTreeFilter = new StarTreeFilter(Map.of(dimensionToMerge, List.of(rangeMatchDimFilter), "status", List.of(rangeMatchDimFilter))); + mergedStarTreeFilter = StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + starTreeFilter, + dimensionToMerge, + List.of(exactMatchDimFilter) + ); + assertEquals(2, mergedStarTreeFilter.getDimensions().size()); + DimensionFilter mergedDimensionFilter3 = mergedStarTreeFilter.getFiltersForDimension(dimensionToMerge).get(0); + assertEquals(RangeMatchDimFilter.class, mergedDimensionFilter3.getClass()); + DimensionFilter mergedDimensionFilter4 = mergedStarTreeFilter.getFiltersForDimension("status").get(0); + assertEquals(RangeMatchDimFilter.class, mergedDimensionFilter4.getClass()); + + // When Star Tree doesn't have the same dimension as @dimensionToMerge but has other dimensions + starTreeFilter = new StarTreeFilter(Map.of("status", List.of(rangeMatchDimFilter))); + mergedStarTreeFilter = StarTreeQueryHelper.mergeDimensionFilterIfNotExists( + starTreeFilter, + dimensionToMerge, + List.of(exactMatchDimFilter) + ); + assertEquals(2, mergedStarTreeFilter.getDimensions().size()); + DimensionFilter mergedDimensionFilter5 = mergedStarTreeFilter.getFiltersForDimension(dimensionToMerge).get(0); + assertEquals(ExactMatchDimFilter.class, mergedDimensionFilter5.getClass()); + DimensionFilter mergedDimensionFilter6 = mergedStarTreeFilter.getFiltersForDimension("status").get(0); + assertEquals(RangeMatchDimFilter.class, mergedDimensionFilter6.getClass()); + + } + private Directory createStarTreeIndex(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension, List docs) throws IOException { Directory directory = newDirectory(); @@ -129,69 +206,141 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS long starTreeDocCount, docCount; + MapperService mapperService = Mockito.mock(MapperService.class); + SearchContext searchContext = Mockito.mock(SearchContext.class); + + Mockito.when(searchContext.mapperService()).thenReturn(mapperService); + Mockito.when(mapperService.fieldType(SNDV)) + .thenReturn(new NumberFieldMapper.NumberFieldType(SNDV, NumberFieldMapper.NumberType.INTEGER)); + Mockito.when(mapperService.fieldType(DV)) + .thenReturn(new NumberFieldMapper.NumberFieldType(DV, NumberFieldMapper.NumberType.INTEGER)); + Mockito.when(mapperService.fieldType(SDV)) + .thenReturn(new NumberFieldMapper.NumberFieldType(SDV, NumberFieldMapper.NumberType.INTEGER)); + // assert that all documents are included if no filters are given - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Collections.emptyMap()), + context, + searchContext + ); docCount = getDocCount(docs, Map.of()); assertEquals(totalDocs, starTreeDocCount); assertEquals(docCount, starTreeDocCount); // single filter - matches docs - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Map.of(SNDV, List.of(new ExactMatchDimFilter(SNDV, List.of(0L))))), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SNDV, 0L)); assertEquals(1, docCount); assertEquals(docCount, starTreeDocCount); // single filter on 3rd field in ordered dimension - matches docs - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(DV, 0L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Map.of(DV, List.of(new ExactMatchDimFilter(DV, List.of(0L))))), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(DV, 0L)); assertEquals(1, docCount); assertEquals(docCount, starTreeDocCount); // single filter - does not match docs - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 101L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Map.of(SNDV, List.of(new ExactMatchDimFilter(SNDV, List.of(101L))))), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SNDV, 101L)); assertEquals(0, docCount); assertEquals(docCount, starTreeDocCount); // single filter on 3rd field in ordered dimension - does not match docs - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(DV, -101L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Map.of(SNDV, List.of(new ExactMatchDimFilter(SNDV, List.of(-101L))))), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SNDV, -101L)); assertEquals(0, docCount); assertEquals(docCount, starTreeDocCount); // multiple filters - matches docs - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L, DV, 0L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter( + Map.of(SNDV, List.of(new ExactMatchDimFilter(SNDV, List.of(0L))), DV, List.of(new ExactMatchDimFilter(DV, List.of(0L)))) + ), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SNDV, 0L, DV, 0L)); assertEquals(1, docCount); assertEquals(docCount, starTreeDocCount); // no document should match the filter - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L, DV, -11L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter( + Map.of(SNDV, List.of(new ExactMatchDimFilter(SNDV, List.of(0L))), DV, List.of(new ExactMatchDimFilter(DV, List.of(-11L)))) + ), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SNDV, 0L, DV, -11L)); assertEquals(0, docCount); assertEquals(docCount, starTreeDocCount); // Only the first filter should match some documents, second filter matches none - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SNDV, 0L, DV, -100L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter( + Map.of(SNDV, List.of(new ExactMatchDimFilter(SNDV, List.of(0L))), DV, List.of(new ExactMatchDimFilter(DV, List.of(-100L)))) + ), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SNDV, 0L, DV, -100L)); assertEquals(0, docCount); assertEquals(docCount, starTreeDocCount); // non-dimension fields in filter - should throw IllegalArgumentException expectThrows( - IllegalArgumentException.class, - () -> getDocCountFromStarTree(starTreeDocValuesReader, Map.of(FIELD_NAME, 0L), context) + IllegalStateException.class, + () -> getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Map.of(FIELD_NAME, List.of(new ExactMatchDimFilter(FIELD_NAME, List.of(0L))))), + context, + searchContext + ) ); if (skipStarNodeCreationForSDVDimension == true) { // Documents are not indexed - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SDV, 4L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Map.of(SDV, List.of(new ExactMatchDimFilter(SDV, List.of(4L))))), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SDV, 4L)); assertEquals(1, docCount); assertEquals(docCount, starTreeDocCount); } else { // Documents are indexed - starTreeDocCount = getDocCountFromStarTree(starTreeDocValuesReader, Map.of(SDV, 4L), context); + starTreeDocCount = getDocCountFromStarTree( + starTreeDocValuesReader, + new StarTreeFilter(Map.of(SDV, List.of(new ExactMatchDimFilter(SDV, List.of(4L))))), + context, + searchContext + ); docCount = getDocCount(docs, Map.of(SDV, 4L)); assertEquals(0, docCount); assertEquals(docCount, starTreeDocCount); @@ -225,12 +374,16 @@ private long getDocCount(List documents, Map filters) { } // Returns count of documents in the star tree having field SNDV & applied filters - private long getDocCountFromStarTree(CompositeIndexReader starTreeDocValuesReader, Map filters, LeafReaderContext context) - throws IOException { + private long getDocCountFromStarTree( + CompositeIndexReader starTreeDocValuesReader, + StarTreeFilter starTreeFilter, + LeafReaderContext context, + SearchContext searchContext + ) throws IOException { List compositeIndexFields = starTreeDocValuesReader.getCompositeIndexFields(); CompositeIndexFieldInfo starTree = compositeIndexFields.get(0); StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(context, starTree); - FixedBitSet filteredValues = StarTreeFilter.getStarTreeResult(starTreeValues, filters, Set.of()); + FixedBitSet filteredValues = StarTreeTraversalUtil.getStarTreeResult(starTreeValues, starTreeFilter, searchContext); SortedNumericStarTreeValuesIterator valuesIterator = (SortedNumericStarTreeValuesIterator) starTreeValues.getMetricValuesIterator( StarTreeUtils.fullyQualifiedFieldNameForStarTreeMetricsDocValues( @@ -262,7 +415,12 @@ private long getDocCountFromStarTree(CompositeIndexReader starTreeDocValuesReade return docCount; } - public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipStarNodeCreationForSDVDimension) throws IOException { + public static XContentBuilder getExpandedMapping( + int maxLeafDocs, + boolean skipStarNodeCreationForSDVDimension, + LinkedHashMap dimensionNameAndType, + Map metricFieldNameAndType + ) throws IOException { return topMapping(b -> { b.startObject("composite"); b.startObject("startree"); @@ -274,16 +432,14 @@ public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipSt b.value("sdv"); b.endArray(); } + // FIXME : Change to take dimension order and other inputs as method params. + // FIXME : Create default constants for the existing so other can call easily. b.startArray("ordered_dimensions"); - b.startObject(); - b.field("name", "sndv"); - b.endObject(); - b.startObject(); - b.field("name", "sdv"); - b.endObject(); - b.startObject(); - b.field("name", "dv"); - b.endObject(); + for (String dimension : dimensionNameAndType.keySet()) { + b.startObject(); + b.field("name", dimension); + b.endObject(); + } b.endArray(); b.startArray("metrics"); b.startObject(); @@ -311,18 +467,16 @@ public static XContentBuilder getExpandedMapping(int maxLeafDocs, boolean skipSt b.endObject(); b.endObject(); b.startObject("properties"); - b.startObject("sndv"); - b.field("type", "integer"); - b.endObject(); - b.startObject("sdv"); - b.field("type", "integer"); - b.endObject(); - b.startObject("dv"); - b.field("type", "integer"); - b.endObject(); - b.startObject("field"); - b.field("type", "integer"); - b.endObject(); + for (String dimension : dimensionNameAndType.keySet()) { + b.startObject(dimension); + b.field("type", dimensionNameAndType.get(dimension)); + b.endObject(); + } + for (String metricField : metricFieldNameAndType.keySet()) { + b.startObject(metricField); + b.field("type", metricFieldNameAndType.get(metricField)); + b.endObject(); + } b.endObject(); }); } diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index b982665e01d8a..78e3d4f50a0d5 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -95,10 +95,8 @@ import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.Dimension; import org.opensearch.index.compositeindex.datacube.Metric; -import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; -import org.opensearch.index.fielddata.IndexFieldDataService; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.CompositeDataCubeFieldType; @@ -129,7 +127,6 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.indices.IndicesModule; -import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.SearchPlugin; import org.opensearch.script.ScriptService; @@ -143,7 +140,6 @@ import org.opensearch.search.aggregations.support.CoreValuesSourceType; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.search.aggregations.support.ValuesSourceType; -import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.FetchPhase; import org.opensearch.search.fetch.subphase.FetchDocValuesPhase; import org.opensearch.search.fetch.subphase.FetchSourcePhase; @@ -162,6 +158,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -349,7 +346,7 @@ protected CountingAggregator createCountingAggregator( IndexSearcher indexSearcher, IndexSettings indexSettings, CompositeIndexFieldInfo starTree, - List supportedDimensions, + LinkedHashMap supportedDimensions, List supportedMetrics, MultiBucketConsumer bucketConsumer, AggregatorFactory aggregatorFactory, @@ -394,7 +391,7 @@ protected SearchContext createSearchContextWithStarTreeContext( Query query, QueryBuilder queryBuilder, CompositeIndexFieldInfo starTree, - List supportedDimensions, + LinkedHashMap supportedDimensions, List supportedMetrics, MultiBucketConsumer bucketConsumer, AggregatorFactory aggregatorFactory, @@ -427,16 +424,23 @@ protected SearchContext createSearchContextWithStarTreeContext( when(compositeMappedFieldType.getCompositeIndexType()).thenReturn(starTree.getType()); Set compositeFieldTypes = Set.of(compositeMappedFieldType); - when((compositeMappedFieldType).getDimensions()).thenReturn(supportedDimensions); + when((compositeMappedFieldType).getDimensions()).thenReturn(new ArrayList<>(supportedDimensions.keySet())); when((compositeMappedFieldType).getMetrics()).thenReturn(supportedMetrics); MapperService mapperService = mock(MapperService.class); when(mapperService.getCompositeFieldTypes()).thenReturn(compositeFieldTypes); when(searchContext.mapperService()).thenReturn(mapperService); - SearchSourceBuilder sb = new SearchSourceBuilder().query(queryBuilder); - StarTreeQueryContext starTreeQueryContext = StarTreeQueryHelper.getStarTreeQueryContext(searchContext, sb); + for (Dimension dimension : supportedDimensions.keySet()) { + when(mapperService.fieldType(dimension.getField())).thenReturn(supportedDimensions.get(dimension)); + when(searchContext.getQueryShardContext().fieldMapper(dimension.getField())).thenReturn(supportedDimensions.get(dimension)); + } + + StarTreeQueryContext starTreeQueryContext = new StarTreeQueryContext(searchContext, queryBuilder); + boolean consolidated = starTreeQueryContext.consolidateAllFilters(searchContext); + if (consolidated) { + searchContext.getQueryShardContext().setStarTreeQueryContext(starTreeQueryContext); + } - when(searchContext.getStarTreeQueryContext()).thenReturn(starTreeQueryContext); return searchContext; } @@ -496,13 +500,6 @@ public boolean shouldCache(Query query) { when(mapperService.getIndexSettings()).thenReturn(indexSettings); when(mapperService.hasNested()).thenReturn(false); when(searchContext.mapperService()).thenReturn(mapperService); - IndexFieldDataService ifds = new IndexFieldDataService( - indexSettings, - new IndicesFieldDataCache(Settings.EMPTY, new IndexFieldDataCache.Listener() { - }), - circuitBreakerService, - mapperService - ); QueryShardContext queryShardContext = queryShardContextMock( contextIndexSearcher, mapperService, @@ -768,7 +765,7 @@ protected A searchAndReduc QueryBuilder queryBuilder, AggregationBuilder builder, CompositeIndexFieldInfo compositeIndexFieldInfo, - List supportedDimensions, + LinkedHashMap supportedDimensions, List supportedMetrics, int maxBucket, boolean hasNested, From 7306905fb8cfc64dfb16fd64a77b5d74c55204fc Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Wed, 29 Jan 2025 18:30:59 +0530 Subject: [PATCH 28/48] Throwing exception for any update call for Append only indices (#17177) Signed-off-by: RS146BIJAY --- .../index/engine/InternalEngine.java | 7 +++ .../index/engine/InternalEngineTests.java | 49 +++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 43279ac3b2281..47e8d53b22dc5 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -1327,6 +1327,13 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele } private void updateDocs(final Term uid, final List docs, final IndexWriter indexWriter) throws IOException { + if (engineConfig.getIndexSettings().getIndexMetadata().isAppendOnlyIndex()) { + failEngine( + "Failing shard as update operation is not allowed for append only index ", + new EngineException(shardId, "Unable to update document as it is an append only index") + ); + } + if (docs.size() > 1) { indexWriter.softUpdateDocuments(uid, docs, softDeletesField); } else { diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index b49ea9af6d5c4..fa501b155e96b 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -1083,6 +1083,55 @@ public void testConcurrentGetAndFlush() throws Exception { latestGetResult.get().close(); } + public void testUpdateOperationForAppendOnlyIndex() throws Exception { + Settings.Builder settings = Settings.builder() + .put(defaultSettings.getSettings()) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), "true"); + final IndexMetadata indexMetadata = IndexMetadata.builder(defaultSettings.getIndexMetadata()).settings(settings).build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(indexMetadata); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try ( + Store store = createStore(); + InternalEngine engine = createUpdateOnlyEngine( + config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get) + ) + ) { + engine.refresh("warm_up"); + Engine.Searcher searchResult = engine.acquireSearcher("test"); + searchResult.close(); + + final BiFunction searcherFactory = engine::acquireSearcher; + + // create a document + Document document = testDocumentWithTextField(); + document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); + ParsedDocument doc = testParsedDocument("1", null, document, B_1, null); + expectThrows(AlreadyClosedException.class, () -> engine.index(indexForDoc(doc))); + } + } + + private InternalEngine createUpdateOnlyEngine(EngineConfig config) throws IOException { + final Store store = config.getStore(); + final Directory directory = store.directory(); + if (Lucene.indexExists(directory) == false) { + store.createEmpty(config.getIndexSettings().getIndexVersionCreated().luceneVersion); + final String translogUuid = Translog.createEmptyTranslog( + config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + primaryTerm.get() + ); + store.associateIndexWithNewTranslog(translogUuid); + } + + return new InternalEngine(config) { + @Override + protected IndexingStrategy indexingStrategyForOperation(Index index) throws IOException { + return IndexingStrategy.processNormally(false, 0, 0); + } + }; + } + public void testSimpleOperations() throws Exception { engine.refresh("warm_up"); Engine.Searcher searchResult = engine.acquireSearcher("test"); From b2ee0e325f0fefad1c5b29eb93619b3af8b4d27b Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Wed, 29 Jan 2025 21:23:30 +0530 Subject: [PATCH 29/48] Enforcing append only setting for star-tree index (#17180) Signed-off-by: bharath-techie --- .../mapper/ScaledFloatFieldMapperTests.java | 2 ++ .../index/mapper/StarTreeMapperIT.java | 22 +++++++++++++++++++ .../opensearch/index/mapper/ObjectMapper.java | 9 ++++++++ .../TranslogFlushIntervalSettingsTests.java | 19 +++++++++++++--- .../AbstractStarTreeDVFormatTests.java | 1 + .../index/mapper/ObjectMapperTests.java | 2 ++ .../index/mapper/StarTreeMapperTests.java | 3 +++ .../search/SearchServiceStarTreeTests.java | 3 +++ 8 files changed, 58 insertions(+), 3 deletions(-) diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java index c3d62b088ced7..d1af54452bde9 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; @@ -132,6 +133,7 @@ public void testScaledFloatWithStarTree() throws Exception { protected Settings getIndexSettings() { return Settings.builder() .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(super.getIndexSettings()) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index e90665b14adbf..17791f57ca329 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -12,6 +12,7 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Rounding; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; @@ -56,6 +57,7 @@ public class StarTreeMapperIT extends OpenSearchIntegTestCase { private static final String TEST_INDEX = "test"; Settings settings = Settings.builder() .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .build(); @@ -756,6 +758,7 @@ public void testValidCompositeIndexWithDuplicateDates() { public void testCompositeIndexWithIndexNotSpecified() { Settings settings = Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); MapperParsingException ex = expectThrows( MapperParsingException.class, @@ -767,9 +770,25 @@ public void testCompositeIndexWithIndexNotSpecified() { ); } + public void testAppendOnlyInCompositeIndexNotSpecified() { + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .build(); + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get() + ); + assertEquals( + "Failed to parse mapping [_doc]: Set 'index.append_only.enabled' as true as part of index settings to use star tree index", + ex.getMessage() + ); + } + public void testCompositeIndexWithHigherTranslogFlushSize() { Settings settings = Settings.builder() .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(513, ByteSizeUnit.MB)) .build(); IllegalArgumentException ex = expectThrows( @@ -1082,6 +1101,7 @@ public void testMaxDimsCompositeIndex() { Settings.builder() .put(StarTreeIndexSettings.STAR_TREE_MAX_DIMENSIONS_SETTING.getKey(), 2) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) ) .get() @@ -1101,6 +1121,7 @@ public void testMaxMetricsCompositeIndex() { Settings.builder() .put(StarTreeIndexSettings.STAR_TREE_MAX_BASE_METRICS_SETTING.getKey(), 4) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) ) .get() @@ -1119,6 +1140,7 @@ public void testMaxCalendarIntervalsCompositeIndex() { Settings.builder() .put(StarTreeIndexSettings.STAR_TREE_MAX_DATE_INTERVALS_SETTING.getKey(), 1) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) ) .get() diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index b93c82d7a5c7c..a1e63661c61d3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -462,6 +462,15 @@ protected static void parseCompositeField( ) ); } + if (IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.get(parserContext.getSettings()) == false) { + throw new IllegalArgumentException( + String.format( + Locale.ROOT, + "Set '%s' as true as part of index settings to use star tree index", + IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey() + ) + ); + } Iterator> iterator = compositeNode.entrySet().iterator(); if (compositeNode.size() > StarTreeIndexSettings.STAR_TREE_MAX_FIELDS_SETTING.get(parserContext.getSettings())) { throw new IllegalArgumentException( diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TranslogFlushIntervalSettingsTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TranslogFlushIntervalSettingsTests.java index f54b6cdf1b152..ae609b7ffab72 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TranslogFlushIntervalSettingsTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TranslogFlushIntervalSettingsTests.java @@ -31,6 +31,7 @@ public void testValidSettings() { Settings requestSettings = Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "50mb") .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); // This should not throw an exception @@ -38,7 +39,10 @@ public void testValidSettings() { } public void testDefaultTranslogFlushSetting() { - Settings requestSettings = Settings.builder().put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true).build(); + Settings requestSettings = Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + .build(); // This should not throw an exception IllegalArgumentException ex = expectThrows( @@ -61,6 +65,7 @@ public void testNullTranslogFlushSetting() { Settings requestSettings = Settings.builder() .putNull(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey()) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); // This should not throw an exception @@ -75,6 +80,7 @@ public void testExceedingMaxFlushSize() { Settings requestSettings = Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "150mb") .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); IllegalArgumentException ex = expectThrows( @@ -88,6 +94,7 @@ public void testEqualToMaxFlushSize() { Settings requestSettings = Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "100mb") .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); // This should not throw an exception @@ -99,7 +106,10 @@ public void testUpdateIndexThresholdFlushSize() { .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "100mb") .build(); - Settings indexSettings = Settings.builder().put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true).build(); + Settings indexSettings = Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + .build(); // This should not throw an exception assertTrue( @@ -116,7 +126,10 @@ public void testUpdateFlushSizeAboveThresholdWithCompositeIndex() { .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "131mb") .build(); - Settings indexSettings = Settings.builder().put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true).build(); + Settings indexSettings = Settings.builder() + .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) + .build(); Optional err = MetadataCreateIndexService.validateTranslogFlushIntervalSettingsForCompositeIndex( requestSettings, diff --git a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java index 2b9c174a5b6f5..18a7bb03b0a59 100644 --- a/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java +++ b/server/src/test/java/org/opensearch/index/codec/composite912/datacube/startree/AbstractStarTreeDVFormatTests.java @@ -100,6 +100,7 @@ public static MapperService createMapperService(XContentBuilder builder) throws .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .build(); IndexMetadata indexMetadata = IndexMetadata.builder("test").settings(settings).putMapping(builder.toString()).build(); diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index 77534b514a59a..45483ef51a5f9 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -32,6 +32,7 @@ package org.opensearch.index.mapper; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; @@ -542,6 +543,7 @@ public void testCompositeFields() throws Exception { Settings settings = Settings.builder() .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .build(); diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 78edd7c6fe4e7..435621548942b 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -8,6 +8,7 @@ package org.opensearch.index.mapper; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.Rounding; import org.opensearch.common.settings.ClusterSettings; @@ -67,6 +68,7 @@ public void teardown() { protected Settings getIndexSettings() { return Settings.builder() .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)) .put(SETTINGS) .build(); @@ -138,6 +140,7 @@ public void testValidValueForCompositeIndex() { Settings settings = Settings.builder() .put(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "256mb") .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .put(COMPOSITE_INDEX_MAX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), "512mb") .build(); diff --git a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java index b548c844b2476..93b133c0302c9 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceStarTreeTests.java @@ -96,6 +96,7 @@ public void testQueryParsingForMetricAggregations() throws IOException { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); CreateIndexRequestBuilder builder = client().admin() .indices() @@ -246,6 +247,7 @@ public void testQueryParsingForDateHistogramAggregations() throws IOException { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); CreateIndexRequestBuilder builder = client().admin() .indices() @@ -494,6 +496,7 @@ public void testInvalidQueryParsingForDateHistogramAggregations() throws IOExcep .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(StarTreeIndexSettings.IS_COMPOSITE_INDEX_SETTING.getKey(), true) + .put(IndexMetadata.INDEX_APPEND_ONLY_ENABLED_SETTING.getKey(), true) .build(); CreateIndexRequestBuilder builder = client().admin() .indices() From cd149a9a622686f6b65fb13d23e645998e0b4f14 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Wed, 29 Jan 2025 07:54:29 -0800 Subject: [PATCH 30/48] Always write doc_values param to mapping for wildcard field (#16988) --- .../org/opensearch/index/mapper/WildcardFieldMapper.java | 2 +- .../opensearch/index/mapper/WildcardFieldMapperTests.java | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java index e45eaa7766c3c..07dbe695bbbbb 100644 --- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java @@ -102,7 +102,7 @@ public static final class Builder extends ParametrizedFieldMapper.Builder { ); private final Parameter normalizer = Parameter.stringParam("normalizer", false, m -> toType(m).normalizerName, "default"); private final Parameter> meta = Parameter.metaParam(); - private final Parameter hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, false); + private final Parameter hasDocValues = Parameter.docValuesParam(m -> toType(m).hasDocValues, false).alwaysSerialize(); private final IndexAnalyzers indexAnalyzers; public Builder(String name, IndexAnalyzers indexAnalyzers) { diff --git a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java index a93f6b2d47e4f..b19e3687cf944 100644 --- a/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/WildcardFieldMapperTests.java @@ -49,6 +49,7 @@ public class WildcardFieldMapperTests extends MapperTestCase { @Override protected void minimalMapping(XContentBuilder b) throws IOException { b.field("type", "wildcard"); + b.field("doc_values", false); } @Override @@ -59,7 +60,10 @@ protected void writeFieldValue(XContentBuilder builder) throws IOException { @Override protected void registerParameters(ParameterChecker checker) throws IOException { checker.registerConflictCheck("normalizer", b -> b.field("normalizer", "lowercase")); - checker.registerConflictCheck("doc_values", b -> b.field("doc_values", true)); + checker.registerConflictCheck("doc_values", fieldMapping(this::minimalMapping), fieldMapping(xcb -> { + xcb.field("type", "wildcard"); + xcb.field("doc_values", true); + })); checker.registerConflictCheck("null_value", b -> b.field("null_value", "foo")); checker.registerUpdateCheck(b -> b.field("ignore_above", 256), m -> assertEquals(256, ((WildcardFieldMapper) m).ignoreAbove())); } From a1a159649517a1f85b5b9068b0ade4837f33159f Mon Sep 17 00:00:00 2001 From: Yupeng Fu Date: Wed, 29 Jan 2025 10:59:03 -0800 Subject: [PATCH 31/48] [Pull-based Ingestion] Introduce the new pull-based ingestion engine, APIs, and Kafka plugin (#16958) * add ingestion modules Signed-off-by: Yupeng Fu * stream poller wip Signed-off-by: Yupeng Fu * update ingestion engine Signed-off-by: Yupeng Fu * kafka container Signed-off-by: Yupeng Fu * more updates Signed-off-by: Yupeng Fu * local update Signed-off-by: Yupeng Fu * add batch_start/end to stream poller Signed-off-by: Yupeng Fu * add index settings Signed-off-by: Yupeng Fu * local change Signed-off-by: Yupeng Fu * pass docmapper Signed-off-by: Yupeng Fu * basic recovery Signed-off-by: Yupeng Fu * add kafka ingestion as plugin Signed-off-by: Yupeng Fu * add integration test for kafka plugin Signed-off-by: Yupeng Fu * cleanup Signed-off-by: Yupeng Fu * use byte[] for message payload type Signed-off-by: Yupeng Fu * javadocs Signed-off-by: Yupeng Fu * add ingestionEngineTest Signed-off-by: Yupeng Fu * test recovery test in ingestionEngineTest Signed-off-by: Yupeng Fu * unit tests for kafka plugin Signed-off-by: Yupeng Fu * style fix Signed-off-by: Yupeng Fu * add license Signed-off-by: Yupeng Fu * more unit tests Signed-off-by: Yupeng Fu * cleanup Signed-off-by: Yupeng Fu * use a blocking queue to pass polled messages to the processor for processing Signed-off-by: Yupeng Fu * address comments also remove security policy from bootstrap files Signed-off-by: Yupeng Fu * support _op_type in message processing Signed-off-by: Yupeng Fu * simplify ingestion source class Signed-off-by: Yupeng Fu * address more comments Signed-off-by: Yupeng Fu * kafka client sha Signed-off-by: Yupeng Fu * fix style Signed-off-by: Yupeng Fu * more style fix Signed-off-by: Yupeng Fu * add changelog Signed-off-by: Yupeng Fu * use latest gradle feature Signed-off-by: Yupeng Fu * try fix thread leak Signed-off-by: Yupeng Fu * comments Signed-off-by: Yupeng Fu * address comments Signed-off-by: Yupeng Fu * ingestionEngine creation failure tes Signed-off-by: Yupeng Fu * suppress removal warning Signed-off-by: Yupeng Fu * support lucene10 Signed-off-by: Yupeng Fu * address comments Signed-off-by: Yupeng Fu --------- Signed-off-by: Yupeng Fu --- CHANGELOG-3.0.md | 1 + gradle/missing-javadoc.gradle | 1 + plugins/ingestion-kafka/build.gradle | 120 ++ .../licenses/kafka-clients-3.8.1.jar.sha1 | 1 + .../licenses/kafka-clients-LICENSE.txt | 202 ++++ .../licenses/kafka-clients-NOTICE.txt | 19 + .../licenses/slf4j-api-1.7.36.jar.sha1 | 1 + .../licenses/slf4j-api-LICENSE.txt | 24 + .../licenses/slf4j-api-NOTICE.txt | 0 .../plugin/kafka/IngestFromKafkaIT.java | 131 +++ .../plugin/kafka/KafkaConsumerFactory.java | 45 + .../opensearch/plugin/kafka/KafkaMessage.java | 43 + .../opensearch/plugin/kafka/KafkaOffset.java | 97 ++ .../plugin/kafka/KafkaPartitionConsumer.java | 188 +++ .../opensearch/plugin/kafka/KafkaPlugin.java | 41 + .../plugin/kafka/KafkaSourceConfig.java | 48 + .../plugin-metadata/plugin-security.policy | 21 + .../kafka/KafkaConsumerFactoryTests.java | 39 + .../plugin/kafka/KafkaMessageTests.java | 33 + .../plugin/kafka/KafkaOffsetTests.java | 83 ++ .../kafka/KafkaPartitionConsumerTests.java | 125 ++ .../plugin/kafka/KafkaSourceConfigTests.java | 33 + .../opensearch/plugin/kafka/KafkaUtils.java | 87 ++ server/build.gradle | 2 +- .../cluster/metadata/IndexMetadata.java | 79 ++ .../cluster/metadata/IngestionSource.java | 62 + .../common/settings/IndexScopedSettings.java | 5 + .../org/opensearch/index/IndexService.java | 7 +- .../index/IngestionConsumerFactory.java | 49 + .../index/IngestionShardConsumer.java | 89 ++ .../index/IngestionShardPointer.java | 51 + .../java/org/opensearch/index/Message.java | 19 + .../opensearch/index/engine/EngineConfig.java | 13 + .../index/engine/EngineConfigFactory.java | 5 +- .../index/engine/IngestionEngine.java | 1015 +++++++++++++++++ .../opensearch/index/engine/NoOpEngine.java | 18 +- .../index/engine/ReadOnlyEngine.java | 17 +- .../opensearch/index/shard/IndexShard.java | 12 +- .../opensearch/index/translog/Translog.java | 15 + .../opensearch/indices/IndicesService.java | 28 + .../pollingingest/DefaultStreamPoller.java | 286 +++++ .../pollingingest/IngestionEngineFactory.java | 36 + .../MessageProcessorRunnable.java | 236 ++++ .../indices/pollingingest/StreamPoller.java | 71 ++ .../indices/pollingingest/package-info.java | 10 + .../main/java/org/opensearch/node/Node.java | 8 + .../plugins/IngestionConsumerPlugin.java | 36 + .../bootstrap/test-framework.policy | 1 + .../metadata/IngestionSourceTests.java | 67 ++ .../engine/EngineConfigFactoryTests.java | 2 + .../index/engine/FakeIngestionSource.java | 183 +++ .../index/engine/IngestionEngineTests.java | 189 +++ .../DefaultStreamPollerTests.java | 168 +++ .../pollingingest/MessageProcessorTests.java | 99 ++ .../snapshots/SnapshotResiliencyTests.java | 1 + .../index/engine/EngineTestCase.java | 41 +- .../test/OpenSearchIntegTestCase.java | 7 + 57 files changed, 4273 insertions(+), 37 deletions(-) create mode 100644 plugins/ingestion-kafka/build.gradle create mode 100644 plugins/ingestion-kafka/licenses/kafka-clients-3.8.1.jar.sha1 create mode 100644 plugins/ingestion-kafka/licenses/kafka-clients-LICENSE.txt create mode 100644 plugins/ingestion-kafka/licenses/kafka-clients-NOTICE.txt create mode 100644 plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 create mode 100644 plugins/ingestion-kafka/licenses/slf4j-api-LICENSE.txt create mode 100644 plugins/ingestion-kafka/licenses/slf4j-api-NOTICE.txt create mode 100644 plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java create mode 100644 plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaConsumerFactory.java create mode 100644 plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaMessage.java create mode 100644 plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaOffset.java create mode 100644 plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java create mode 100644 plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPlugin.java create mode 100644 plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java create mode 100644 plugins/ingestion-kafka/src/main/plugin-metadata/plugin-security.policy create mode 100644 plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaConsumerFactoryTests.java create mode 100644 plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaMessageTests.java create mode 100644 plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaOffsetTests.java create mode 100644 plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java create mode 100644 plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java create mode 100644 plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java create mode 100644 server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java create mode 100644 server/src/main/java/org/opensearch/index/IngestionConsumerFactory.java create mode 100644 server/src/main/java/org/opensearch/index/IngestionShardConsumer.java create mode 100644 server/src/main/java/org/opensearch/index/IngestionShardPointer.java create mode 100644 server/src/main/java/org/opensearch/index/Message.java create mode 100644 server/src/main/java/org/opensearch/index/engine/IngestionEngine.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java create mode 100644 server/src/main/java/org/opensearch/indices/pollingingest/package-info.java create mode 100644 server/src/main/java/org/opensearch/plugins/IngestionConsumerPlugin.java create mode 100644 server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java create mode 100644 server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java create mode 100644 server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java create mode 100644 server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java create mode 100644 server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 8d8adfd1e3566..bd7fd5fb621ce 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) +- Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) ### Dependencies - Update Apache Lucene to 10.1.0 ([#16366](https://github.com/opensearch-project/OpenSearch/pull/16366)) diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 342417b3b40ab..d65a318b096a9 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -140,6 +140,7 @@ configure([ project(":plugins:discovery-gce:qa:gce"), project(":plugins:identity-shiro"), project(":plugins:ingest-attachment"), + project(":plugins:ingestion-kafka"), project(":plugins:mapper-annotated-text"), project(":plugins:mapper-murmur3"), project(":plugins:mapper-size"), diff --git a/plugins/ingestion-kafka/build.gradle b/plugins/ingestion-kafka/build.gradle new file mode 100644 index 0000000000000..0d5344de213e3 --- /dev/null +++ b/plugins/ingestion-kafka/build.gradle @@ -0,0 +1,120 @@ +/* +* SPDX-License-Identifier: Apache-2.0 +* +* The OpenSearch Contributors require contributions made to +* this file be licensed under the Apache-2.0 license or a +* compatible open source license. +* +* Modifications Copyright OpenSearch Contributors. See +* GitHub history for details. +*/ + +apply plugin: 'opensearch.internal-cluster-test' + +opensearchplugin { + description = 'Pull-based ingestion plugin to consume from Kafka' + classname = 'org.opensearch.plugin.kafka.KafkaPlugin' +} + +versions << [ + 'kafka': '3.8.1', + 'docker': '3.3.6', + 'testcontainers': '1.19.7', + 'ducttape': '1.0.8', +] + +dependencies { + // kafka + api "org.slf4j:slf4j-api:${versions.slf4j}" + api "org.apache.kafka:kafka-clients:${versions.kafka}" + + // test + testImplementation "com.github.docker-java:docker-java-api:${versions.docker}" + testImplementation "com.github.docker-java:docker-java-transport:${versions.docker}" + testImplementation "com.github.docker-java:docker-java-transport-zerodep:${versions.docker}" + testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + testImplementation "org.testcontainers:testcontainers:${versions.testcontainers}" + testImplementation "org.testcontainers:kafka:${versions.testcontainers}" + testImplementation "org.rnorth.duct-tape:duct-tape:${versions.ducttape}" + testImplementation "org.apache.commons:commons-compress:${versions.commonscompress}" + testImplementation "commons-io:commons-io:${versions.commonsio}" + testImplementation 'org.awaitility:awaitility:4.2.0' +} + +internalClusterTest{ + environment 'TESTCONTAINERS_RYUK_DISABLED', 'true' + // TODO: Adding permission in plugin-security.policy doesn't seem to work. + systemProperty 'tests.security.manager', 'false' +} + +thirdPartyAudit { + ignoreMissingClasses( + 'com.fasterxml.jackson.databind.JsonNode', + 'com.fasterxml.jackson.databind.ObjectMapper', + 'com.fasterxml.jackson.databind.node.ArrayNode', + 'com.fasterxml.jackson.databind.node.BooleanNode', + 'com.fasterxml.jackson.databind.node.JsonNodeFactory', + 'com.fasterxml.jackson.databind.node.JsonNodeType', + 'com.fasterxml.jackson.databind.node.NullNode', + 'com.fasterxml.jackson.databind.node.ObjectNode', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4SafeDecompressor', + 'net.jpountz.util.SafeUtils', + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', + 'com.google.common.util.concurrent.ListenableFuture', + 'io.grpc.BindableService', + 'io.grpc.CallOptions', + 'io.grpc.Channel', + 'io.grpc.MethodDescriptor', + 'io.grpc.MethodDescriptor$Builder', + 'io.grpc.MethodDescriptor$MethodType', + 'io.grpc.ServerServiceDefinition', + 'io.grpc.ServerServiceDefinition$Builder', + 'io.grpc.ServiceDescriptor', + 'io.grpc.ServiceDescriptor$Builder', + 'io.grpc.protobuf.ProtoFileDescriptorSupplier', + 'io.grpc.protobuf.ProtoMethodDescriptorSupplier', + 'io.grpc.protobuf.ProtoServiceDescriptorSupplier', + 'io.grpc.protobuf.ProtoUtils', + 'io.grpc.stub.AbstractAsyncStub', + 'io.grpc.stub.AbstractBlockingStub', + 'io.grpc.stub.AbstractFutureStub', + 'io.grpc.stub.AbstractStub$StubFactory', + 'io.grpc.stub.ClientCalls', + 'io.grpc.stub.ServerCalls', + 'io.grpc.stub.ServerCalls$BidiStreamingMethod', + 'io.grpc.stub.ServerCalls$ClientStreamingMethod', + 'io.grpc.stub.ServerCalls$ServerStreamingMethod', + 'io.grpc.stub.ServerCalls$UnaryMethod', + 'io.grpc.stub.StreamObserver', + 'org.jose4j.http.Get', + 'org.jose4j.jwa.AlgorithmConstraints', + 'org.jose4j.jwk.HttpsJwks', + 'org.jose4j.jwk.JsonWebKey', + 'org.jose4j.jwk.JsonWebKeySet', + 'org.jose4j.jwk.VerificationJwkSelector', + 'org.jose4j.jws.JsonWebSignature', + 'org.jose4j.jwt.JwtClaims', + 'org.jose4j.jwt.NumericDate', + 'org.jose4j.jwt.consumer.InvalidJwtException', + 'org.jose4j.jwt.consumer.JwtConsumer', + 'org.jose4j.jwt.consumer.JwtConsumerBuilder', + 'org.jose4j.jwt.consumer.JwtContext', + 'org.jose4j.jwx.Headers', + 'org.jose4j.keys.resolvers.VerificationKeyResolver', + ) + ignoreViolations( + 'org.apache.kafka.shaded.com.google.protobuf.MessageSchema', + 'org.apache.kafka.shaded.com.google.protobuf.UnsafeUtil', + 'org.apache.kafka.shaded.com.google.protobuf.UnsafeUtil$1', + 'org.apache.kafka.shaded.com.google.protobuf.UnsafeUtil$Android32MemoryAccessor', + 'org.apache.kafka.shaded.com.google.protobuf.UnsafeUtil$Android64MemoryAccessor', + 'org.apache.kafka.shaded.com.google.protobuf.UnsafeUtil$JvmMemoryAccessor', + 'org.apache.kafka.shaded.com.google.protobuf.UnsafeUtil$MemoryAccessor', + ) +} diff --git a/plugins/ingestion-kafka/licenses/kafka-clients-3.8.1.jar.sha1 b/plugins/ingestion-kafka/licenses/kafka-clients-3.8.1.jar.sha1 new file mode 100644 index 0000000000000..3864a4eb6a0dd --- /dev/null +++ b/plugins/ingestion-kafka/licenses/kafka-clients-3.8.1.jar.sha1 @@ -0,0 +1 @@ +fd79e3aa252c6d818334e9c0bac8166b426e498c \ No newline at end of file diff --git a/plugins/ingestion-kafka/licenses/kafka-clients-LICENSE.txt b/plugins/ingestion-kafka/licenses/kafka-clients-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/ingestion-kafka/licenses/kafka-clients-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/ingestion-kafka/licenses/kafka-clients-NOTICE.txt b/plugins/ingestion-kafka/licenses/kafka-clients-NOTICE.txt new file mode 100644 index 0000000000000..677b5251d9bfa --- /dev/null +++ b/plugins/ingestion-kafka/licenses/kafka-clients-NOTICE.txt @@ -0,0 +1,19 @@ +Apache Kafka +Copyright 2022 The Apache Software Foundation. + +This product includes software developed at +The Apache Software Foundation (https://www.apache.org/). + +This distribution has a binary dependency on jersey, which is available under the CDDL +License. The source code of jersey can be found at https://github.com/jersey/jersey/. + +The streams-scala (streams/streams-scala) module was donated by Lightbend and the original code was copyrighted by them: +Copyright (C) 2018 Lightbend Inc. +Copyright (C) 2017-2018 Alexis Seigneurin. + +This project contains the following code copied from Apache Hadoop: +clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java +Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license. + +This project contains the following code copied from Apache Hive: +streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java diff --git a/plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 b/plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/plugins/ingestion-kafka/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/plugins/ingestion-kafka/licenses/slf4j-api-LICENSE.txt b/plugins/ingestion-kafka/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..1a3d053237bec --- /dev/null +++ b/plugins/ingestion-kafka/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,24 @@ +Copyright (c) 2004-2022 QOS.ch Sarl (Switzerland) +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + + diff --git a/plugins/ingestion-kafka/licenses/slf4j-api-NOTICE.txt b/plugins/ingestion-kafka/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java new file mode 100644 index 0000000000000..c726489d12626 --- /dev/null +++ b/plugins/ingestion-kafka/src/internalClusterTest/java/org/opensearch/plugin/kafka/IngestFromKafkaIT.java @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; + +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.serialization.StringSerializer; +import org.opensearch.action.admin.cluster.node.info.NodeInfo; +import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.node.info.PluginsAndModules; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.RangeQueryBuilder; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.Assert; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import org.testcontainers.containers.KafkaContainer; +import org.testcontainers.utility.DockerImageName; + +import static org.hamcrest.Matchers.is; +import static org.awaitility.Awaitility.await; + +/** + * Integration test for Kafka ingestion + */ +@ThreadLeakLingering(linger = 15000) // wait for container pull thread to die +public class IngestFromKafkaIT extends OpenSearchIntegTestCase { + static final String topicName = "test"; + + private KafkaContainer kafka; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(KafkaPlugin.class); + } + + /** + * test ingestion-kafka-plugin is installed + */ + public void testPluginsAreInstalled() { + NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(); + nodesInfoRequest.addMetric(NodesInfoRequest.Metric.PLUGINS.metricName()); + NodesInfoResponse nodesInfoResponse = OpenSearchIntegTestCase.client().admin().cluster().nodesInfo(nodesInfoRequest).actionGet(); + List pluginInfos = nodesInfoResponse.getNodes() + .stream() + .flatMap( + (Function>) nodeInfo -> nodeInfo.getInfo(PluginsAndModules.class).getPluginInfos().stream() + ) + .collect(Collectors.toList()); + Assert.assertTrue( + pluginInfos.stream().anyMatch(pluginInfo -> pluginInfo.getName().equals("org.opensearch.plugin.kafka.KafkaPlugin")) + ); + } + + public void testKafkaIngestion() { + setupKafka(); + // create an index with ingestion source from kafka + createIndex( + "test", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("ingestion_source.type", "kafka") + .put("ingestion_source.pointer.init.reset", "earliest") + .put("ingestion_source.param.topic", "test") + .put("ingestion_source.param.bootstrap_servers", kafka.getBootstrapServers()) + .build(), + "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}" + ); + + RangeQueryBuilder query = new RangeQueryBuilder("age").gte(21); + await().atMost(10, TimeUnit.SECONDS).untilAsserted(() -> { + refresh("test"); + SearchResponse response = client().prepareSearch("test").setQuery(query).get(); + assertThat(response.getHits().getTotalHits().value(), is(1L)); + }); + + stopKafka(); + } + + private void setupKafka() { + kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka:6.2.1")) + // disable topic auto creation + .withEnv("KAFKA_AUTO_CREATE_TOPICS_ENABLE", "false"); + kafka.start(); + prepareKafkaData(); + } + + private void stopKafka() { + kafka.stop(); + } + + private void prepareKafkaData() { + String boostrapServers = kafka.getBootstrapServers(); + KafkaUtils.createTopic(topicName, 1, boostrapServers); + Properties props = new Properties(); + props.put("bootstrap.servers", kafka.getBootstrapServers()); + Producer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + producer.send(new ProducerRecord<>(topicName, "null", "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}")); + producer.send( + new ProducerRecord<>( + topicName, + "null", + "{\"_id\":\"2\", \"_op_type:\":\"index\",\"_source\":{\"name\":\"alice\", \"age\": 20}}" + ) + ); + producer.close(); + } +} diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaConsumerFactory.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaConsumerFactory.java new file mode 100644 index 0000000000000..83c2480a3c985 --- /dev/null +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaConsumerFactory.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.index.IngestionConsumerFactory; + +import java.util.Map; + +/** + * Factory for creating Kafka consumers + */ +public class KafkaConsumerFactory implements IngestionConsumerFactory { + + /** + * Configuration for the Kafka source + */ + protected KafkaSourceConfig config; + + /** + * Constructor. + */ + public KafkaConsumerFactory() {} + + @Override + public void initialize(Map params) { + config = new KafkaSourceConfig(params); + } + + @Override + public KafkaPartitionConsumer createShardConsumer(String clientId, int shardId) { + assert config != null; + return new KafkaPartitionConsumer(clientId, config, shardId); + } + + @Override + public KafkaOffset parsePointerFromString(String pointer) { + return new KafkaOffset(Long.valueOf(pointer)); + } +} diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaMessage.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaMessage.java new file mode 100644 index 0000000000000..7ff13f48ef846 --- /dev/null +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaMessage.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.common.Nullable; +import org.opensearch.index.Message; + +/** + * Kafka message + */ +public class KafkaMessage implements Message { + private final byte[] key; + private final byte[] payload; + + /** + * Constructor + * @param key the key of the message + * @param payload the payload of the message + */ + public KafkaMessage(@Nullable byte[] key, byte[] payload) { + this.key = key; + this.payload = payload; + } + + /** + * Get the key of the message + * @return the key of the message + */ + public byte[] getKey() { + return key; + } + + @Override + public byte[] getPayload() { + return payload; + } +} diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaOffset.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaOffset.java new file mode 100644 index 0000000000000..0a1c11d5daa8b --- /dev/null +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaOffset.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.search.Query; +import org.opensearch.index.IngestionShardPointer; + +import java.nio.ByteBuffer; + +/** + * Kafka offset. + */ +public class KafkaOffset implements IngestionShardPointer { + + private final long offset; + + /** + * Constructor + * @param offset the offset + */ + public KafkaOffset(long offset) { + assert offset >= 0; + this.offset = offset; + } + + @Override + public byte[] serialize() { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(offset); + return buffer.array(); + } + + @Override + public String asString() { + return String.valueOf(offset); + } + + @Override + public Field asPointField(String fieldName) { + return new LongPoint(fieldName, offset); + } + + @Override + public Query newRangeQueryGreaterThan(String fieldName) { + return LongPoint.newRangeQuery(fieldName, offset, Long.MAX_VALUE); + } + + /** + * Get the offset + * @return the offset + */ + public long getOffset() { + return offset; + } + + @Override + public String toString() { + return "KafkaOffset{" + "offset=" + offset + '}'; + } + + @Override + public int compareTo(IngestionShardPointer o) { + if (o == null) { + throw new IllegalArgumentException("the pointer is null"); + } + if (!(o instanceof KafkaOffset)) { + throw new IllegalArgumentException("the pointer is of type " + o.getClass() + " and not KafkaOffset"); + } + KafkaOffset other = (KafkaOffset) o; + return Long.compare(offset, other.offset); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + KafkaOffset that = (KafkaOffset) o; + return offset == that.offset; + } + + @Override + public int hashCode() { + return Long.hashCode(offset); + } +} diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java new file mode 100644 index 0000000000000..a20e52a06eecd --- /dev/null +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPartitionConsumer.java @@ -0,0 +1,188 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.index.IngestionShardPointer; + +import java.io.IOException; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeoutException; + +/** + * Kafka consumer to read messages from a Kafka partition + */ +@SuppressWarnings("removal") +public class KafkaPartitionConsumer implements IngestionShardConsumer { + private static final Logger logger = LogManager.getLogger(KafkaPartitionConsumer.class); + + /** + * The Kafka consumer + */ + protected final Consumer consumer; + // TODO: make this configurable + private final int timeoutMillis = 1000; + + private long lastFetchedOffset = -1; + final String clientId; + final TopicPartition topicPartition; + + /** + * Constructor + * @param clientId the client id + * @param config the Kafka source config + * @param partitionId the partition id + */ + public KafkaPartitionConsumer(String clientId, KafkaSourceConfig config, int partitionId) { + this(clientId, config, partitionId, createConsumer(clientId, config)); + } + + /** + * Constructor, visible for testing + * @param clientId the client id + * @param config the Kafka source config + * @param partitionId the partition id + * @param consumer the created Kafka consumer + */ + protected KafkaPartitionConsumer(String clientId, KafkaSourceConfig config, int partitionId, Consumer consumer) { + this.clientId = clientId; + this.consumer = consumer; + String topic = config.getTopic(); + List partitionInfos = AccessController.doPrivileged( + (PrivilegedAction>) () -> consumer.partitionsFor(topic, Duration.ofMillis(timeoutMillis)) + ); + if (partitionInfos == null) { + throw new IllegalArgumentException("Topic " + topic + " does not exist"); + } + if (partitionId >= partitionInfos.size()) { + throw new IllegalArgumentException("Partition " + partitionId + " does not exist in topic " + topic); + } + topicPartition = new TopicPartition(topic, partitionId); + consumer.assign(Collections.singletonList(topicPartition)); + logger.info("Kafka consumer created for topic {} partition {}", topic, partitionId); + } + + /** + * Create a Kafka consumer. visible for testing + * @param clientId the client id + * @param config the Kafka source config + * @return the Kafka consumer + */ + protected static Consumer createConsumer(String clientId, KafkaSourceConfig config) { + Properties consumerProp = new Properties(); + consumerProp.put("bootstrap.servers", config.getBootstrapServers()); + consumerProp.put("client.id", clientId); + // TODO: why Class org.apache.kafka.common.serialization.StringDeserializer could not be found if set the deserializer as prop? + // consumerProp.put("key.deserializer", + // "org.apache.kafka.common.serialization.StringDeserializer"); + // consumerProp.put("value.deserializer", + // "org.apache.kafka.common.serialization.StringDeserializer"); + // + // wrap the kafka consumer creation in a privileged block to apply plugin security policies + return AccessController.doPrivileged( + (PrivilegedAction>) () -> new KafkaConsumer<>( + consumerProp, + new ByteArrayDeserializer(), + new ByteArrayDeserializer() + ) + ); + } + + @Override + public List> readNext(KafkaOffset offset, long maxMessages, int timeoutMillis) + throws TimeoutException { + List> records = AccessController.doPrivileged( + (PrivilegedAction>>) () -> fetch(offset.getOffset(), maxMessages, timeoutMillis) + ); + return records; + } + + @Override + public KafkaOffset nextPointer() { + return new KafkaOffset(lastFetchedOffset + 1); + } + + @Override + public IngestionShardPointer earliestPointer() { + long startOffset = AccessController.doPrivileged( + (PrivilegedAction) () -> consumer.beginningOffsets(Collections.singletonList(topicPartition)) + .getOrDefault(topicPartition, 0L) + ); + return new KafkaOffset(startOffset); + } + + @Override + public IngestionShardPointer latestPointer() { + long endOffset = AccessController.doPrivileged( + (PrivilegedAction) () -> consumer.endOffsets(Collections.singletonList(topicPartition)).getOrDefault(topicPartition, 0L) + ); + return new KafkaOffset(endOffset); + } + + private synchronized List> fetch(long startOffset, long maxMessages, int timeoutMillis) { + if (lastFetchedOffset < 0 || lastFetchedOffset != startOffset - 1) { + logger.info("Seeking to offset {}", startOffset); + consumer.seek(topicPartition, startOffset); + // update the last fetched offset so that we don't need to seek again if no more messages to fetch + lastFetchedOffset = startOffset - 1; + } + + ConsumerRecords consumerRecords = consumer.poll(Duration.ofMillis(timeoutMillis)); + List> messageAndOffsets = consumerRecords.records(topicPartition); + + long endOffset = startOffset + maxMessages; + List> results = new ArrayList<>(); + + for (ConsumerRecord messageAndOffset : messageAndOffsets) { + long currentOffset = messageAndOffset.offset(); + if (currentOffset >= endOffset) { + // fetched more message than max + break; + } + lastFetchedOffset = currentOffset; + KafkaOffset kafkaOffset = new KafkaOffset(currentOffset); + KafkaMessage message = new KafkaMessage(messageAndOffset.key(), messageAndOffset.value()); + results.add(new ReadResult<>(kafkaOffset, message)); + } + return results; + } + + @Override + public int getShardId() { + return topicPartition.partition(); + } + + @Override + public void close() throws IOException { + consumer.close(); + } + + /** + * Get the client id + * @return the client id + */ + public String getClientId() { + return clientId; + } +} diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPlugin.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPlugin.java new file mode 100644 index 0000000000000..8de2834c40583 --- /dev/null +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaPlugin.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.index.IngestionConsumerFactory; +import org.opensearch.plugins.IngestionConsumerPlugin; +import org.opensearch.plugins.Plugin; + +import java.util.Map; + +/** + * A plugin for ingestion source of Kafka. + */ +public class KafkaPlugin extends Plugin implements IngestionConsumerPlugin { + /** + * The type of the ingestion source. + */ + public static final String TYPE = "KAFKA"; + + /** + * Constructor. + */ + public KafkaPlugin() {} + + @SuppressWarnings("rawtypes") + @Override + public Map getIngestionConsumerFactories() { + return Map.of(TYPE, new KafkaConsumerFactory()); + } + + @Override + public String getType() { + return TYPE; + } +} diff --git a/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java new file mode 100644 index 0000000000000..099300c6e5767 --- /dev/null +++ b/plugins/ingestion-kafka/src/main/java/org/opensearch/plugin/kafka/KafkaSourceConfig.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import java.util.Map; +import java.util.Objects; + +/** + * Class encapsulating the configuration of a Kafka source. + */ +public class KafkaSourceConfig { + private final String topic; + private final String bootstrapServers; + + /** + * Constructor + * @param params the configuration parameters + */ + public KafkaSourceConfig(Map params) { + // TODO: better parsing and validation + this.topic = (String) Objects.requireNonNull(params.get("topic")); + this.bootstrapServers = (String) Objects.requireNonNull(params.get("bootstrap_servers")); + assert this.bootstrapServers != null; + } + + /** + * Get the topic name + * @return the topic name + */ + public String getTopic() { + return topic; + } + + /** + * Get the bootstrap servers + * + * @return the bootstrap servers + */ + public String getBootstrapServers() { + return bootstrapServers; + } +} diff --git a/plugins/ingestion-kafka/src/main/plugin-metadata/plugin-security.policy b/plugins/ingestion-kafka/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..2fec30325339d --- /dev/null +++ b/plugins/ingestion-kafka/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + // needed because Kafka Consumer needs the access to MBeans + permission javax.management.MBeanServerPermission "createMBeanServer"; + permission javax.management.MBeanTrustPermission "register"; + permission javax.management.MBeanPermission "*", "*"; + permission java.lang.management.ManagementPermission "monitor"; + permission java.lang.management.ManagementPermission "control"; + + // Allow host/ip name service lookups + permission java.net.SocketPermission "*", "connect"; + permission java.net.SocketPermission "*", "resolve"; +}; + diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaConsumerFactoryTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaConsumerFactoryTests.java new file mode 100644 index 0000000000000..deaa4b1f0b369 --- /dev/null +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaConsumerFactoryTests.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +import java.util.HashMap; +import java.util.Map; + +public class KafkaConsumerFactoryTests extends OpenSearchTestCase { + public void testInitialize() { + KafkaConsumerFactory factory = new KafkaConsumerFactory(); + Map params = new HashMap<>(); + params.put("topic", "test-topic"); + params.put("bootstrap_servers", "localhost:9092"); + + factory.initialize(params); + + KafkaSourceConfig config = factory.config; + Assert.assertNotNull("Config should be initialized", config); + Assert.assertEquals("Topic should be correctly initialized", "test-topic", config.getTopic()); + Assert.assertEquals("Bootstrap servers should be correctly initialized", "localhost:9092", config.getBootstrapServers()); + } + + public void testParsePointerFromString() { + KafkaConsumerFactory factory = new KafkaConsumerFactory(); + KafkaOffset offset = factory.parsePointerFromString("12345"); + + Assert.assertNotNull("Offset should be parsed", offset); + Assert.assertEquals("Offset value should be correctly parsed", 12345L, offset.getOffset()); + } +} diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaMessageTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaMessageTests.java new file mode 100644 index 0000000000000..a7601f7a1284e --- /dev/null +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaMessageTests.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +public class KafkaMessageTests extends OpenSearchTestCase { + public void testConstructorAndGetters() { + byte[] key = { 1, 2, 3 }; + byte[] payload = { 4, 5, 6 }; + + KafkaMessage message = new KafkaMessage(key, payload); + + Assert.assertArrayEquals(key, message.getKey()); + Assert.assertArrayEquals(payload, message.getPayload()); + } + + public void testConstructorWithNullKey() { + byte[] payload = { 4, 5, 6 }; + + KafkaMessage message = new KafkaMessage(null, payload); + + assertNull(message.getKey()); + Assert.assertArrayEquals(payload, message.getPayload()); + } +} diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaOffsetTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaOffsetTests.java new file mode 100644 index 0000000000000..c4170c5682073 --- /dev/null +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaOffsetTests.java @@ -0,0 +1,83 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.Query; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +import java.nio.ByteBuffer; + +public class KafkaOffsetTests extends OpenSearchTestCase { + + public void testConstructorAndGetters() { + long offset = 12345L; + KafkaOffset kafkaOffset = new KafkaOffset(offset); + + Assert.assertEquals("The offset should be correctly initialized and returned", offset, kafkaOffset.getOffset()); + } + + public void testEqualsAndHashCode() { + long offset1 = 12345L; + long offset2 = 67890L; + KafkaOffset kafkaOffset1 = new KafkaOffset(offset1); + KafkaOffset kafkaOffset2 = new KafkaOffset(offset1); + KafkaOffset kafkaOffset3 = new KafkaOffset(offset2); + + Assert.assertTrue("Offsets with the same value should be equal", kafkaOffset1.equals(kafkaOffset2)); + Assert.assertFalse("Offsets with different values should not be equal", kafkaOffset1.equals(kafkaOffset3)); + Assert.assertEquals("Hash codes for equal offsets should be the same", kafkaOffset1.hashCode(), kafkaOffset2.hashCode()); + Assert.assertNotEquals("Hash codes for different offsets should not be the same", kafkaOffset1.hashCode(), kafkaOffset3.hashCode()); + } + + public void testSerialize() { + long offset = 12345L; + KafkaOffset kafkaOffset = new KafkaOffset(offset); + byte[] expectedBytes = ByteBuffer.allocate(Long.BYTES).putLong(offset).array(); + + Assert.assertArrayEquals("The serialized bytes should be correct", expectedBytes, kafkaOffset.serialize()); + } + + public void testAsString() { + long offset = 12345L; + KafkaOffset kafkaOffset = new KafkaOffset(offset); + + Assert.assertEquals("The string representation should be correct", String.valueOf(offset), kafkaOffset.asString()); + } + + public void testAsPointField() { + long offset = 12345L; + KafkaOffset kafkaOffset = new KafkaOffset(offset); + Field field = kafkaOffset.asPointField("offsetField"); + + Assert.assertTrue("The field should be an instance of LongPoint", field instanceof LongPoint); + } + + public void testNewRangeQueryGreaterThan() { + long offset = 12345L; + KafkaOffset kafkaOffset = new KafkaOffset(offset); + Query query = kafkaOffset.newRangeQueryGreaterThan("offsetField"); + + Assert.assertTrue("The query should be an instance of range query", query instanceof PointRangeQuery); + } + + public void testCompareTo() { + long offset1 = 12345L; + long offset2 = 67890L; + KafkaOffset kafkaOffset1 = new KafkaOffset(offset1); + KafkaOffset kafkaOffset2 = new KafkaOffset(offset2); + + Assert.assertTrue("The comparison should be correct", kafkaOffset1.compareTo(kafkaOffset2) < 0); + Assert.assertTrue("The comparison should be correct", kafkaOffset2.compareTo(kafkaOffset1) > 0); + Assert.assertTrue("The comparison should be correct", kafkaOffset1.compareTo(kafkaOffset1) == 0); + } +} diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java new file mode 100644 index 0000000000000..96f639366d887 --- /dev/null +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaPartitionConsumerTests.java @@ -0,0 +1,125 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.PartitionInfo; +import org.apache.kafka.common.TopicPartition; +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class KafkaPartitionConsumerTests extends OpenSearchTestCase { + + private KafkaSourceConfig config; + private KafkaConsumer mockConsumer; + private KafkaPartitionConsumer consumer; + + @SuppressWarnings("unchecked") + @Before + public void setUp() throws Exception { + super.setUp(); + Map params = new HashMap<>(); + params.put("topic", "test-topic"); + params.put("bootstrap_servers", "localhost:9092"); + + config = new KafkaSourceConfig(params); + mockConsumer = mock(KafkaConsumer.class); + // Mock the partitionsFor method + PartitionInfo partitionInfo = new PartitionInfo("test-topic", 0, null, null, null); + when(mockConsumer.partitionsFor(eq("test-topic"), any(Duration.class))).thenReturn(Collections.singletonList(partitionInfo)); + consumer = new KafkaPartitionConsumer("client1", config, 0, mockConsumer); + } + + public void testReadNext() throws Exception { + TopicPartition topicPartition = new TopicPartition("test-topic", 0); + ConsumerRecord record = new ConsumerRecord<>("test-topic", 0, 0, null, "message".getBytes(StandardCharsets.UTF_8)); + ConsumerRecords records = new ConsumerRecords<>( + Collections.singletonMap(topicPartition, Collections.singletonList(record)) + ); + + when(mockConsumer.poll(any(Duration.class))).thenReturn(records); + + List> result = consumer.readNext(new KafkaOffset(0), 10, 1000); + + assertEquals(1, result.size()); + assertEquals("message", new String(result.get(0).getMessage().getPayload(), StandardCharsets.UTF_8)); + assertEquals(1, consumer.nextPointer().getOffset()); + assertEquals(0, consumer.getShardId()); + assertEquals("client1", consumer.getClientId()); + } + + public void testEarliestPointer() { + TopicPartition topicPartition = new TopicPartition("test-topic", 0); + when(mockConsumer.beginningOffsets(Collections.singletonList(topicPartition))).thenReturn( + Collections.singletonMap(topicPartition, 0L) + ); + + KafkaOffset offset = (KafkaOffset) consumer.earliestPointer(); + + assertEquals(0L, offset.getOffset()); + } + + public void testLatestPointer() { + TopicPartition topicPartition = new TopicPartition("test-topic", 0); + when(mockConsumer.endOffsets(Collections.singletonList(topicPartition))).thenReturn(Collections.singletonMap(topicPartition, 10L)); + + KafkaOffset offset = (KafkaOffset) consumer.latestPointer(); + + assertEquals(10L, offset.getOffset()); + } + + public void testTopicDoesNotExist() { + Map params = new HashMap<>(); + params.put("topic", "non-existent-topic"); + params.put("bootstrap_servers", "localhost:9092"); + var kafkaSourceConfig = new KafkaSourceConfig(params); + when(mockConsumer.partitionsFor(eq("non-existent-topic"), any(Duration.class))).thenReturn(null); + try { + new KafkaPartitionConsumer("client1", kafkaSourceConfig, 0, mockConsumer); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("Topic non-existent-topic does not exist", e.getMessage()); + } + } + + public void testPartitionDoesNotExist() { + PartitionInfo partitionInfo = new PartitionInfo("test-topic", 0, null, null, null); + when(mockConsumer.partitionsFor(eq("test-topic"), any(Duration.class))).thenReturn(Collections.singletonList(partitionInfo)); + try { + new KafkaPartitionConsumer("client1", config, 1, mockConsumer); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("Partition 1 does not exist in topic test-topic", e.getMessage()); + } + } + + public void testCreateConsumer() { + String clientId = "test-client"; + Consumer consumer = KafkaPartitionConsumer.createConsumer(clientId, config); + + assertNotNull(consumer); + assertEquals(KafkaConsumer.class, consumer.getClass()); + } +} diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java new file mode 100644 index 0000000000000..aa4ddb94f23fc --- /dev/null +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaSourceConfigTests.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Assert; + +import java.util.HashMap; +import java.util.Map; + +public class KafkaSourceConfigTests extends OpenSearchTestCase { + + public void testConstructorAndGetters() { + Map params = new HashMap<>(); + params.put("topic", "topic"); + params.put("bootstrap_servers", "bootstrap"); + + KafkaSourceConfig config = new KafkaSourceConfig(params); + + Assert.assertEquals("The topic should be correctly initialized and returned", "topic", config.getTopic()); + Assert.assertEquals( + "The bootstrap servers should be correctly initialized and returned", + "bootstrap", + config.getBootstrapServers() + ); + } +} diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java new file mode 100644 index 0000000000000..39af56ea04ed7 --- /dev/null +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugin.kafka; + +import org.apache.kafka.clients.admin.AdminClient; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.KafkaAdminClient; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.errors.TopicExistsException; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; + +import static org.awaitility.Awaitility.await; + +public class KafkaUtils { + private static final Logger LOGGER = LogManager.getLogger(KafkaUtils.class); + + /** + * Creates kafka topic + * + * @param topicName the topic name + * @param bootstrapServer kafka bootstrap server list + */ + public static void createTopic(String topicName, String bootstrapServer) { + createTopic(topicName, 1, bootstrapServer); + } + + public static void createTopic(String topicName, int numOfPartitions, String bootstrapServers) { + try { + getAdminClient(bootstrapServers, (client -> { + NewTopic newTopic = new NewTopic(topicName, numOfPartitions, (short) 1); + client.createTopics(List.of(newTopic)); + return true; + })); + + } catch (TopicExistsException e) { + // Catch TopicExistsException otherwise it will break maven-surefire-plugin + LOGGER.warn("Topic {} already existed", topicName); + } + + // validates topic is created + await().atMost(3, TimeUnit.SECONDS).until(() -> checkTopicExistence(topicName, bootstrapServers)); + } + + public static boolean checkTopicExistence(String topicName, String bootstrapServers) { + return getAdminClient(bootstrapServers, (client -> { + Map> topics = client.describeTopics(List.of(topicName)).values(); + + try { + return topics.containsKey(topicName) && topics.get(topicName).get().name().equals(topicName); + } catch (InterruptedException e) { + LOGGER.error("error on checkTopicExistence", e); + return false; + } catch (ExecutionException e) { + LOGGER.error("error on checkTopicExistence", e); + return false; + } + })); + } + + private static Rep getAdminClient(String bootstrapServer, Function function) { + AdminClient adminClient = KafkaAdminClient.create( + ImmutableMap.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer, AdminClientConfig.CLIENT_ID_CONFIG, "test") + ); + try { + return function.apply(adminClient); + } finally { + adminClient.close(); + } + } +} diff --git a/server/build.gradle b/server/build.gradle index 82eafb07a7ad3..e7541cfdccdff 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -113,7 +113,7 @@ dependencies { // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap api libs.roaringbitmap - + testImplementation 'org.awaitility:awaitility:4.2.0' testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index e09630d813ebf..4e605dce498ab 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -71,6 +71,7 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.indices.pollingingest.StreamPoller; import org.opensearch.indices.replication.SegmentReplicationSource; import org.opensearch.indices.replication.common.ReplicationType; @@ -656,6 +657,63 @@ public static APIBlock readFrom(StreamInput input) throws IOException { Property.IndexScope ); + /** + * Used to specify the type for the ingestion index. If not specified, the ingestion source not enabled + */ + public static final String SETTING_INGESTION_SOURCE_TYPE = "index.ingestion_source.type"; + public static final String NONE_INGESTION_SOURCE_TYPE = "none"; + public static final Setting INGESTION_SOURCE_TYPE_SETTING = Setting.simpleString( + SETTING_INGESTION_SOURCE_TYPE, + NONE_INGESTION_SOURCE_TYPE, + new Setting.Validator<>() { + + @Override + public void validate(final String value) { + // TODO: validate this with the registered types in the ingestion source plugin + } + + @Override + public void validate(final String value, final Map, Object> settings) { + // TODO: validate this with the ingestion source params + } + }, + Property.IndexScope + ); + + /** + * Used to specify initial reset policy for the ingestion pointer. If not specified, default to the latest + */ + public static final String SETTING_INGESTION_SOURCE_POINTER_INIT_RESET = "index.ingestion_source.pointer.init.reset"; + public static final Setting INGESTION_SOURCE_POINTER_INIT_RESET_SETTING = Setting.simpleString( + SETTING_INGESTION_SOURCE_POINTER_INIT_RESET, + StreamPoller.ResetState.LATEST.name(), + new Setting.Validator<>() { + + @Override + public void validate(final String value) { + if (!(value.equalsIgnoreCase(StreamPoller.ResetState.LATEST.name()) + || value.equalsIgnoreCase(StreamPoller.ResetState.EARLIEST.name()))) { + throw new IllegalArgumentException( + "Invalid value for " + SETTING_INGESTION_SOURCE_POINTER_INIT_RESET + " [" + value + "]" + ); + } + } + + @Override + public void validate(final String value, final Map, Object> settings) {} + }, + Property.IndexScope, + Property.Dynamic + ); + + public static final Setting.AffixSetting INGESTION_SOURCE_PARAMS_SETTING = Setting.prefixKeySetting( + "index.ingestion_source.param.", + key -> new Setting<>(key, "", (value) -> { + // TODO: add ingestion source params validation + return value; + }, Property.IndexScope) + ); + /** * an internal index format description, allowing us to find out if this index is upgraded or needs upgrading */ @@ -683,6 +741,7 @@ public static APIBlock readFrom(StreamInput input) throws IOException { public static final String REMOTE_STORE_CUSTOM_KEY = "remote_store"; public static final String TRANSLOG_METADATA_KEY = "translog_metadata"; public static final String CONTEXT_KEY = "context"; + public static final String INGESTION_SOURCE_KEY = "ingestion_source"; public static final String INDEX_STATE_FILE_PREFIX = "state-"; @@ -863,6 +922,25 @@ public Version getCreationVersion() { return indexCreatedVersion; } + /** + * Gets the ingestion source. + * @return ingestion source, or null if ingestion source is not enabled + */ + public IngestionSource getIngestionSource() { + final String ingestionSourceType = INGESTION_SOURCE_TYPE_SETTING.get(settings); + if (ingestionSourceType != null && !(NONE_INGESTION_SOURCE_TYPE.equals(ingestionSourceType))) { + final String pointerInitReset = INGESTION_SOURCE_POINTER_INIT_RESET_SETTING.get(settings); + final Map ingestionSourceParams = INGESTION_SOURCE_PARAMS_SETTING.getAsMap(settings); + return new IngestionSource(ingestionSourceType, pointerInitReset, ingestionSourceParams); + } + return null; + } + + public boolean useIngestionSource() { + final String ingestionSourceType = INGESTION_SOURCE_TYPE_SETTING.get(settings); + return ingestionSourceType != null && !(NONE_INGESTION_SOURCE_TYPE.equals(ingestionSourceType)); + } + /** * Return the {@link Version} on which this index has been upgraded. This * information is typically useful for backward compatibility. @@ -1209,6 +1287,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.rolloverInfos.putAll(rolloverInfos.apply(part.rolloverInfos)); builder.system(isSystem); builder.context(context); + // TODO: support ingestion source return builder.build(); } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java new file mode 100644 index 0000000000000..583114d9ecbd2 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/metadata/IngestionSource.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Map; +import java.util.Objects; + +/** + * Class encapsulating the configuration of an ingestion source. + */ +@ExperimentalApi +public class IngestionSource { + private String type; + private String pointerInitReset; + private Map params; + + public IngestionSource(String type, String pointerInitReset, Map params) { + this.type = type; + this.pointerInitReset = pointerInitReset; + this.params = params; + } + + public String getType() { + return type; + } + + public String getPointerInitReset() { + return pointerInitReset; + } + + public Map params() { + return params; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IngestionSource ingestionSource = (IngestionSource) o; + return Objects.equals(type, ingestionSource.type) + && Objects.equals(pointerInitReset, ingestionSource.pointerInitReset) + && Objects.equals(params, ingestionSource.params); + } + + @Override + public int hashCode() { + return Objects.hash(type, pointerInitReset, params); + } + + @Override + public String toString() { + return "IngestionSource{" + "type='" + type + '\'' + ",pointer_init_reset='" + pointerInitReset + '\'' + ", params=" + params + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 387ed0ed92680..b8ace8495ad96 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -257,6 +257,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_CONTEXT_CREATED_VERSION, IndexSettings.INDEX_CONTEXT_CURRENT_VERSION, + // Settings for ingestion source + IndexMetadata.INGESTION_SOURCE_TYPE_SETTING, + IndexMetadata.INGESTION_SOURCE_POINTER_INIT_RESET_SETTING, + IndexMetadata.INGESTION_SOURCE_PARAMS_SETTING, + // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { Map groups = s.getAsGroups(); diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index f1b36194bf62d..c623b0c63866c 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -308,8 +308,11 @@ public IndexService( // kick off async ops for the first shard in this index this.refreshTask = new AsyncRefreshTask(this); this.trimTranslogTask = new AsyncTrimTranslogTask(this); - this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); - this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); + // disable these checks for ingestion source engine + if (!indexSettings.getIndexMetadata().useIngestionSource()) { + this.globalCheckpointTask = new AsyncGlobalCheckpointTask(this); + this.retentionLeaseSyncTask = new AsyncRetentionLeaseSyncTask(this); + } if (READER_WRITER_SPLIT_EXPERIMENTAL_SETTING.get(indexSettings.getNodeSettings())) { this.asyncReplicationTask = new AsyncReplicationTask(this); } diff --git a/server/src/main/java/org/opensearch/index/IngestionConsumerFactory.java b/server/src/main/java/org/opensearch/index/IngestionConsumerFactory.java new file mode 100644 index 0000000000000..dec8596981024 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/IngestionConsumerFactory.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.util.Map; + +/** + * A factory for creating {@link IngestionShardConsumer}. + * + * @param the type of the {@link IngestionShardConsumer} + * @param

the type of the {@link IngestionShardPointer} + */ +@ExperimentalApi +public interface IngestionConsumerFactory { + /** + * Initialize the factory with the configuration parameters. This method is called once when the factory is created, + * and the parameters are parsed from the {@link org.opensearch.cluster.metadata.IngestionSource} in + * {@link org.opensearch.cluster.metadata.IndexMetadata}. + * @param params the configuration parameters to initialize the factory + */ + void initialize(Map params); + + /** + * Create a consumer to ingest messages from a shard of the streams. When the ingestion engine created per shard, + * this method is called to create the consumer in the poller. Before the invocation of this method, the configuration + * is passed to the factory through the {@link #initialize(Map)} method. + * + * @param clientId the client id assigned to the consumer + * @param shardId the id of the shard + * @return the created consumer + */ + T createShardConsumer(String clientId, int shardId); + + /** + * Parses the pointer from a string representation to the pointer object. This is used for recovering from the index + * checkpoints. + * @param pointer the string representation of the pointer + * @return the recovered pointer + */ + P parsePointerFromString(String pointer); +} diff --git a/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java new file mode 100644 index 0000000000000..02a9f5a18ebb1 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/IngestionShardConsumer.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.Closeable; +import java.util.List; + +/** + * A consumer for reading messages from an ingestion shard. + * @param the type of the pointer to the message + * @param the type of the message + */ +@ExperimentalApi +public interface IngestionShardConsumer extends Closeable { + + /** + * A read result containing the pointer and the message + * @param the type of the pointer to the message + * @param the type of the message + */ + @ExperimentalApi + class ReadResult { + T pointer; + M message; + + /** + * Create a new read result + * @param pointer the pointer to the message + * @param message the message + */ + public ReadResult(T pointer, M message) { + this.pointer = pointer; + this.message = message; + } + + /** + * @return the pointer to the message + */ + public T getPointer() { + return pointer; + } + + /** + * @return the message + */ + public M getMessage() { + return message; + } + } + + /** + * Read the next set of messages from the source + * @param pointer the pointer to start reading from, inclusive + * @param maxMessages, the maximum number of messages to read, or -1 for no limit + * @param timeoutMillis the maximum time to wait for messages + * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis} + * milliseconds + * @return a list of messages read from the source + */ + List> readNext(T pointer, long maxMessages, int timeoutMillis) throws java.util.concurrent.TimeoutException; + + /** + * @return the next pointer to read from + */ + T nextPointer(); + + /** + * @return the earliest pointer in the shard + */ + IngestionShardPointer earliestPointer(); + + /** + * @return the latest pointer in the shard. The pointer points to the next offset of the last message in the stream. + */ + IngestionShardPointer latestPointer(); + + /** + * @return the shard id + */ + int getShardId(); +} diff --git a/server/src/main/java/org/opensearch/index/IngestionShardPointer.java b/server/src/main/java/org/opensearch/index/IngestionShardPointer.java new file mode 100644 index 0000000000000..fc9c14f1ca0f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/IngestionShardPointer.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.apache.lucene.document.Field; +import org.apache.lucene.search.Query; +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * An interface for the pointer to a shard in the ingestion engine, and it is used to track the message offset of + * ingestion. + */ +@ExperimentalApi +public interface IngestionShardPointer extends Comparable { + String OFFSET_FIELD = "_offset"; + + /** + * Serialize the pointer to a byte array. + * @return the serialized byte array + */ + byte[] serialize(); + + /** + * Convert the pointer to a string. + * @return the string representation of the pointer + */ + String asString(); + + /** + * Creates a point field for this pointer. This is used to store the pointer in the index for range search during + * checkpoint recovery. + * @param fieldName the field name to create the point field + * @return the point field + */ + Field asPointField(String fieldName); + + /** + * Create a new range query for values greater than the pointer. This is used in recovering from the ingestion + * checkpoints. + * + * @param fieldName the field name to create the range query + * @return query for values greater than the pointer + */ + Query newRangeQueryGreaterThan(String fieldName); +} diff --git a/server/src/main/java/org/opensearch/index/Message.java b/server/src/main/java/org/opensearch/index/Message.java new file mode 100644 index 0000000000000..d2ac92c2cc2b5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/Message.java @@ -0,0 +1,19 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.annotation.ExperimentalApi; + +/** + * A message ingested from the ingestion source that contains an index operation + */ +@ExperimentalApi +public interface Message { + T getPayload(); +} diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index fe2ce470cda03..6540e69e7dfcd 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -53,6 +53,7 @@ import org.opensearch.index.codec.CodecAliases; import org.opensearch.index.codec.CodecService; import org.opensearch.index.codec.CodecSettings; +import org.opensearch.index.mapper.DocumentMapperForType; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.store.Store; @@ -110,6 +111,7 @@ public final class EngineConfig { private final boolean isReadOnlyReplica; private final BooleanSupplier startedPrimarySupplier; private final Comparator leafSorter; + private final Supplier documentMapperForTypeSupplier; /** * A supplier of the outstanding retention leases. This is used during merged operations to determine which operations that have been @@ -296,6 +298,7 @@ private EngineConfig(Builder builder) { this.startedPrimarySupplier = builder.startedPrimarySupplier; this.translogFactory = builder.translogFactory; this.leafSorter = builder.leafSorter; + this.documentMapperForTypeSupplier = builder.documentMapperForTypeSupplier; } /** @@ -544,6 +547,10 @@ public TombstoneDocSupplier getTombstoneDocSupplier() { return tombstoneDocSupplier; } + public Supplier getDocumentMapperForTypeSupplier() { + return documentMapperForTypeSupplier; + } + public TranslogDeletionPolicyFactory getCustomTranslogDeletionPolicyFactory() { return translogDeletionPolicyFactory; } @@ -589,6 +596,7 @@ public static class Builder { private boolean isReadOnlyReplica; private BooleanSupplier startedPrimarySupplier; private TranslogFactory translogFactory = new InternalTranslogFactory(); + private Supplier documentMapperForTypeSupplier; Comparator leafSorter; public Builder shardId(ShardId shardId) { @@ -701,6 +709,11 @@ public Builder tombstoneDocSupplier(TombstoneDocSupplier tombstoneDocSupplier) { return this; } + public Builder documentMapperForTypeSupplier(Supplier documentMapperForTypeSupplier) { + this.documentMapperForTypeSupplier = documentMapperForTypeSupplier; + return this; + } + public Builder translogDeletionPolicyFactory(TranslogDeletionPolicyFactory translogDeletionPolicyFactory) { this.translogDeletionPolicyFactory = translogDeletionPolicyFactory; return this; diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index 77e2f1c55201d..d892d6e95346c 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -25,6 +25,7 @@ import org.opensearch.index.codec.CodecService; import org.opensearch.index.codec.CodecServiceConfig; import org.opensearch.index.codec.CodecServiceFactory; +import org.opensearch.index.mapper.DocumentMapperForType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.store.Store; @@ -154,7 +155,8 @@ public EngineConfig newEngineConfig( boolean isReadOnlyReplica, BooleanSupplier startedPrimarySupplier, TranslogFactory translogFactory, - Comparator leafSorter + Comparator leafSorter, + Supplier documentMapperForTypeSupplier ) { CodecService codecServiceToUse = codecService; if (codecService == null && this.codecServiceFactory != null) { @@ -188,6 +190,7 @@ public EngineConfig newEngineConfig( .startedPrimarySupplier(startedPrimarySupplier) .translogFactory(translogFactory) .leafSorter(leafSorter) + .documentMapperForTypeSupplier(documentMapperForTypeSupplier) .build(); } diff --git a/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java new file mode 100644 index 0000000000000..2a1206d0a6ef2 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/IngestionEngine.java @@ -0,0 +1,1015 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.analysis.standard.StandardAnalyzer; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.SegmentCommitInfo; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.ShuffleForcedMergePolicy; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.util.InfoStream; +import org.opensearch.ExceptionsHelper; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IngestionSource; +import org.opensearch.common.Booleans; +import org.opensearch.common.Nullable; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lucene.LoggerInfoStream; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.IngestionConsumerFactory; +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.index.IngestionShardPointer; +import org.opensearch.index.mapper.DocumentMapperForType; +import org.opensearch.index.mapper.IdFieldMapper; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.merge.MergeStats; +import org.opensearch.index.merge.OnGoingMerge; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.shard.OpenSearchMergePolicy; +import org.opensearch.index.translog.NoOpTranslogManager; +import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogCorruptedException; +import org.opensearch.index.translog.TranslogManager; +import org.opensearch.index.translog.TranslogStats; +import org.opensearch.indices.pollingingest.DefaultStreamPoller; +import org.opensearch.indices.pollingingest.StreamPoller; +import org.opensearch.search.suggest.completion.CompletionStats; +import org.opensearch.threadpool.ThreadPool; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.UnaryOperator; + +import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_SNAPSHOT; + +/** + * IngestionEngine is an engine that ingests data from a stream source. + */ +public class IngestionEngine extends Engine { + + private volatile SegmentInfos lastCommittedSegmentInfos; + private final CompletionStatsCache completionStatsCache; + private final IndexWriter indexWriter; + private final OpenSearchReaderManager internalReaderManager; + private final ExternalReaderManager externalReaderManager; + private final Lock flushLock = new ReentrantLock(); + private final ReentrantLock optimizeLock = new ReentrantLock(); + private final OpenSearchConcurrentMergeScheduler mergeScheduler; + private final AtomicBoolean shouldPeriodicallyFlushAfterBigMerge = new AtomicBoolean(false); + private final TranslogManager translogManager; + private final DocumentMapperForType documentMapperForType; + private final IngestionConsumerFactory ingestionConsumerFactory; + private StreamPoller streamPoller; + + /** + * UUID value that is updated every time the engine is force merged. + */ + @Nullable + private volatile String forceMergeUUID; + + public IngestionEngine(EngineConfig engineConfig, IngestionConsumerFactory ingestionConsumerFactory) { + super(engineConfig); + store.incRef(); + boolean success = false; + try { + this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); + IndexMetadata indexMetadata = engineConfig.getIndexSettings().getIndexMetadata(); + assert indexMetadata != null; + mergeScheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings()); + indexWriter = createWriter(); + externalReaderManager = createReaderManager(new InternalEngine.RefreshWarmerListener(logger, isClosed, engineConfig)); + internalReaderManager = externalReaderManager.internalReaderManager; + translogManager = new NoOpTranslogManager( + shardId, + readLock, + this::ensureOpen, + new TranslogStats(0, 0, 0, 0, 0), + EMPTY_TRANSLOG_SNAPSHOT + ); + documentMapperForType = engineConfig.getDocumentMapperForTypeSupplier().get(); + this.ingestionConsumerFactory = Objects.requireNonNull(ingestionConsumerFactory); + + success = true; + } catch (IOException | TranslogCorruptedException e) { + throw new EngineCreationFailureException(shardId, "failed to create engine", e); + } finally { + if (!success) { + if (streamPoller != null) { + try { + streamPoller.close(); + } catch (IOException e) { + logger.error("failed to close stream poller", e); + throw new RuntimeException(e); + } + } + if (!isClosed.get()) { + // failure, we need to dec the store reference + store.decRef(); + } + } + } + } + + /** + * Starts the ingestion engine to pull. + */ + public void start() { + IndexMetadata indexMetadata = engineConfig.getIndexSettings().getIndexMetadata(); + assert indexMetadata != null; + IngestionSource ingestionSource = Objects.requireNonNull(indexMetadata.getIngestionSource()); + + // initialize the ingestion consumer factory + this.ingestionConsumerFactory.initialize(ingestionSource.params()); + String clientId = engineConfig.getIndexSettings().getNodeName() + + "-" + + engineConfig.getIndexSettings().getIndex().getName() + + "-" + + engineConfig.getShardId().getId(); + IngestionShardConsumer ingestionShardConsumer = this.ingestionConsumerFactory.createShardConsumer( + clientId, + engineConfig.getShardId().getId() + ); + logger.info("created ingestion consumer for shard [{}]", engineConfig.getShardId()); + + Map commitData = commitDataAsMap(); + StreamPoller.ResetState resetState = StreamPoller.ResetState.valueOf( + ingestionSource.getPointerInitReset().toUpperCase(Locale.ROOT) + ); + IngestionShardPointer startPointer = null; + Set persistedPointers = new HashSet<>(); + if (commitData.containsKey(StreamPoller.BATCH_START)) { + // try recovering from commit data + String batchStartStr = commitData.get(StreamPoller.BATCH_START); + startPointer = this.ingestionConsumerFactory.parsePointerFromString(batchStartStr); + try (Searcher searcher = acquireSearcher("restore_offset", SearcherScope.INTERNAL)) { + persistedPointers = fetchPersistedOffsets(Lucene.wrapAllDocsLive(searcher.getDirectoryReader()), startPointer); + logger.info("recovered persisted pointers: {}", persistedPointers); + } catch (IOException e) { + throw new EngineCreationFailureException(config().getShardId(), "failed to restore offset", e); + } + // reset to none so the poller will poll from the startPointer + resetState = StreamPoller.ResetState.NONE; + } + + streamPoller = new DefaultStreamPoller(startPointer, persistedPointers, ingestionShardConsumer, this, resetState); + streamPoller.start(); + } + + private IndexWriter createWriter() throws IOException { + try { + final IndexWriterConfig iwc = getIndexWriterConfig(); + return createWriter(store.directory(), iwc); + } catch (LockObtainFailedException ex) { + logger.warn("could not lock IndexWriter", ex); + throw ex; + } + } + + public DocumentMapperForType getDocumentMapperForType() { + return documentMapperForType; + } + + protected Set fetchPersistedOffsets(DirectoryReader directoryReader, IngestionShardPointer batchStart) + throws IOException { + final IndexSearcher searcher = new IndexSearcher(directoryReader); + searcher.setQueryCache(null); + var query = batchStart.newRangeQueryGreaterThan(IngestionShardPointer.OFFSET_FIELD); + + // Execute the search + var topDocs = searcher.search(query, Integer.MAX_VALUE); + Set result = new HashSet<>(); + var storedFields = searcher.getIndexReader().storedFields(); + for (var scoreDoc : topDocs.scoreDocs) { + var doc = storedFields.document(scoreDoc.doc); + String valueStr = doc.get(IngestionShardPointer.OFFSET_FIELD); + IngestionShardPointer value = ingestionConsumerFactory.parsePointerFromString(valueStr); + result.add(value); + } + + refresh("restore_offset", SearcherScope.INTERNAL, true); + return result; + } + + /** + * a copy of ExternalReaderManager from InternalEngine + */ + @SuppressForbidden(reason = "reference counting is required here") + static final class ExternalReaderManager extends ReferenceManager { + private final BiConsumer refreshListener; + private final OpenSearchReaderManager internalReaderManager; + private boolean isWarmedUp; // guarded by refreshLock + + ExternalReaderManager( + OpenSearchReaderManager internalReaderManager, + BiConsumer refreshListener + ) throws IOException { + this.refreshListener = refreshListener; + this.internalReaderManager = internalReaderManager; + this.current = internalReaderManager.acquire(); // steal the reference without warming up + } + + @Override + protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader referenceToRefresh) throws IOException { + // we simply run a blocking refresh on the internal reference manager and then steal it's reader + // it's a save operation since we acquire the reader which incs it's reference but then down the road + // steal it by calling incRef on the "stolen" reader + internalReaderManager.maybeRefreshBlocking(); + final OpenSearchDirectoryReader newReader = internalReaderManager.acquire(); + if (isWarmedUp == false || newReader != referenceToRefresh) { + boolean success = false; + try { + refreshListener.accept(newReader, isWarmedUp ? referenceToRefresh : null); + isWarmedUp = true; + success = true; + } finally { + if (success == false) { + internalReaderManager.release(newReader); + } + } + } + // nothing has changed - both ref managers share the same instance so we can use reference equality + if (referenceToRefresh == newReader) { + internalReaderManager.release(newReader); + return null; + } else { + return newReader; // steal the reference + } + } + + @Override + protected boolean tryIncRef(OpenSearchDirectoryReader reference) { + return reference.tryIncRef(); + } + + @Override + protected int getRefCount(OpenSearchDirectoryReader reference) { + return reference.getRefCount(); + } + + @Override + protected void decRef(OpenSearchDirectoryReader reference) throws IOException { + reference.decRef(); + } + } + + private ExternalReaderManager createReaderManager(InternalEngine.RefreshWarmerListener externalRefreshListener) throws EngineException { + boolean success = false; + OpenSearchReaderManager internalReaderManager = null; + try { + try { + final OpenSearchDirectoryReader directoryReader = OpenSearchDirectoryReader.wrap( + DirectoryReader.open(indexWriter), + shardId + ); + internalReaderManager = new OpenSearchReaderManager(directoryReader); + lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); + success = true; + return externalReaderManager; + } catch (IOException e) { + maybeFailEngine("start", e); + try { + indexWriter.rollback(); + } catch (IOException inner) { // iw is closed below + e.addSuppressed(inner); + } + throw new EngineCreationFailureException(shardId, "failed to open reader on writer", e); + } + } finally { + if (success == false) { // release everything we created on a failure + IOUtils.closeWhileHandlingException(internalReaderManager, indexWriter); + } + } + } + + // pkg-private for testing + IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { + return new IndexWriter(directory, iwc); + } + + private IndexWriterConfig getIndexWriterConfig() { + final IndexWriterConfig iwc = new IndexWriterConfig(engineConfig.getAnalyzer()); + iwc.setCommitOnClose(false); // we by default don't commit on close + iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); + // with tests.verbose, lucene sets this up: plumb to align with filesystem stream + boolean verbose = false; + try { + verbose = Boolean.parseBoolean(System.getProperty("tests.verbose")); + } catch (Exception ignore) {} + iwc.setInfoStream(verbose ? InfoStream.getDefault() : new LoggerInfoStream(logger)); + iwc.setMergeScheduler(mergeScheduler); + // set merge scheduler + MergePolicy mergePolicy = config().getMergePolicy(); + boolean shuffleForcedMerge = Booleans.parseBoolean(System.getProperty("opensearch.shuffle_forced_merge", Boolean.TRUE.toString())); + if (shuffleForcedMerge) { + // We wrap the merge policy for all indices even though it is mostly useful for time-based indices + // but there should be no overhead for other type of indices so it's simpler than adding a setting + // to enable it. + mergePolicy = new ShuffleForcedMergePolicy(mergePolicy); + } + + if (config().getIndexSettings().isMergeOnFlushEnabled()) { + final long maxFullFlushMergeWaitMillis = config().getIndexSettings().getMaxFullFlushMergeWaitTime().millis(); + if (maxFullFlushMergeWaitMillis > 0) { + iwc.setMaxFullFlushMergeWaitMillis(maxFullFlushMergeWaitMillis); + final Optional> mergeOnFlushPolicy = config().getIndexSettings().getMergeOnFlushPolicy(); + if (mergeOnFlushPolicy.isPresent()) { + mergePolicy = mergeOnFlushPolicy.get().apply(mergePolicy); + } + } + } else { + // Disable merge on refresh + iwc.setMaxFullFlushMergeWaitMillis(0); + } + + iwc.setCheckPendingFlushUpdate(config().getIndexSettings().isCheckPendingFlushEnabled()); + iwc.setMergePolicy(new OpenSearchMergePolicy(mergePolicy)); + iwc.setSimilarity(engineConfig.getSimilarity()); + iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().getMbFrac()); + iwc.setCodec(engineConfig.getCodec()); + iwc.setUseCompoundFile(engineConfig.useCompoundFile()); + if (config().getIndexSort() != null) { + iwc.setIndexSort(config().getIndexSort()); + } + if (config().getLeafSorter() != null) { + iwc.setLeafSorter(config().getLeafSorter()); // The default segment search order + } + + return new IndexWriterConfig(new StandardAnalyzer()); + } + + @Override + public TranslogManager translogManager() { + // ingestion engine does not have translog + return translogManager; + } + + @Override + protected SegmentInfos getLastCommittedSegmentInfos() { + return lastCommittedSegmentInfos; + } + + @Override + protected SegmentInfos getLatestSegmentInfos() { + throw new UnsupportedOperationException(); + } + + @Override + public String getHistoryUUID() { + return loadHistoryUUID(lastCommittedSegmentInfos.userData); + } + + @Override + public long getWritingBytes() { + return 0; + } + + @Override + public CompletionStats completionStats(String... fieldNamePatterns) { + return completionStatsCache.get(fieldNamePatterns); + } + + @Override + public long getIndexThrottleTimeInMillis() { + return 0; + } + + @Override + public boolean isThrottled() { + return false; + } + + @Override + public IndexResult index(Index index) throws IOException { + assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field(); + ensureOpen(); + final IndexResult indexResult; + indexResult = indexIntoLucene(index); + return indexResult; + } + + private IndexResult indexIntoLucene(Index index) throws IOException { + // todo: handle updates + addDocs(index.docs(), indexWriter); + return new IndexResult(index.version(), index.primaryTerm(), index.seqNo(), true); + } + + private void addDocs(final List docs, final IndexWriter indexWriter) throws IOException { + if (docs.size() > 1) { + indexWriter.addDocuments(docs); + } else { + indexWriter.addDocument(docs.get(0)); + } + } + + @Override + public DeleteResult delete(Delete delete) throws IOException { + return null; + } + + @Override + public NoOpResult noOp(NoOp noOp) throws IOException { + ensureOpen(); + NoOpResult noOpResult = new NoOpResult(noOp.primaryTerm(), noOp.seqNo()); + return noOpResult; + } + + @Override + public GetResult get(Get get, BiFunction searcherFactory) throws EngineException { + return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); + } + + @Override + protected ReferenceManager getReferenceManager(SearcherScope scope) { + return externalReaderManager; + } + + @Override + public Closeable acquireHistoryRetentionLock() { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public Translog.Snapshot newChangesSnapshot( + String source, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean accurateCount + ) throws IOException { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public int countNumberOfHistoryOperations(String source, long fromSeqNo, long toSeqNumber) throws IOException { + return 0; + } + + @Override + public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) { + return false; + } + + @Override + public long getMinRetainedSeqNo() { + return 0; + } + + @Override + public long getPersistedLocalCheckpoint() { + return 0; + } + + @Override + public long getProcessedLocalCheckpoint() { + return 0; + } + + @Override + public SeqNoStats getSeqNoStats(long globalCheckpoint) { + return null; + } + + @Override + public long getLastSyncedGlobalCheckpoint() { + return 0; + } + + @Override + public long getIndexBufferRAMBytesUsed() { + return 0; + } + + @Override + public List segments(boolean verbose) { + try (ReleasableLock lock = readLock.acquire()) { + Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose); + + // fill in the merges flag + Set onGoingMerges = mergeScheduler.onGoingMerges(); + for (OnGoingMerge onGoingMerge : onGoingMerges) { + for (SegmentCommitInfo segmentInfoPerCommit : onGoingMerge.getMergedSegments()) { + for (Segment segment : segmentsArr) { + if (segment.getName().equals(segmentInfoPerCommit.info.name)) { + segment.mergeId = onGoingMerge.getId(); + break; + } + } + } + } + return Arrays.asList(segmentsArr); + } + } + + @Override + public void refresh(String source) throws EngineException { + refresh(source, SearcherScope.EXTERNAL, true); + } + + final boolean refresh(String source, SearcherScope scope, boolean block) throws EngineException { + boolean refreshed; + try { + // refresh does not need to hold readLock as ReferenceManager can handle correctly if the engine is closed in mid-way. + if (store.tryIncRef()) { + // increment the ref just to ensure nobody closes the store during a refresh + try { + // even though we maintain 2 managers we really do the heavy-lifting only once. + // the second refresh will only do the extra work we have to do for warming caches etc. + ReferenceManager referenceManager = getReferenceManager(scope); + // it is intentional that we never refresh both internal / external together + if (block) { + referenceManager.maybeRefreshBlocking(); + refreshed = true; + } else { + refreshed = referenceManager.maybeRefresh(); + } + } finally { + store.decRef(); + } + } else { + refreshed = false; + } + } catch (AlreadyClosedException e) { + failOnTragicEvent(e); + throw e; + } catch (Exception e) { + try { + failEngine("refresh failed source[" + source + "]", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new RefreshFailedEngineException(shardId, e); + } + // We check for pruning in each delete request, but we also prune here e.g. in case a delete burst comes in and then no more deletes + // for a long time: + maybePruneDeletes(); + // TODO: use OS merge scheduler + mergeScheduler.refreshConfig(); + return refreshed; + } + + @Override + public boolean maybeRefresh(String source) throws EngineException { + return refresh(source, SearcherScope.EXTERNAL, false); + } + + @Override + public void writeIndexingBuffer() throws EngineException { + refresh("write indexing buffer", SearcherScope.INTERNAL, false); + } + + @Override + public boolean shouldPeriodicallyFlush() { + return false; + } + + @Override + public void flush(boolean force, boolean waitIfOngoing) throws EngineException { + ensureOpen(); + if (force && waitIfOngoing == false) { + assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing; + throw new IllegalArgumentException( + "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing + ); + } + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + if (flushLock.tryLock() == false) { + // if we can't get the lock right away we block if needed otherwise barf + if (waitIfOngoing == false) { + return; + } + logger.trace("waiting for in-flight flush to finish"); + flushLock.lock(); + logger.trace("acquired flush lock after blocking"); + } else { + logger.trace("acquired flush lock immediately"); + } + try { + // Only flush if (1) Lucene has uncommitted docs, or (2) forced by caller, + // + // do we need to consider #3 and #4 as in InternalEngine? + // (3) the newly created commit points to a different translog generation (can free translog), + // or (4) the local checkpoint information in the last commit is stale, which slows down future recoveries. + boolean hasUncommittedChanges = indexWriter.hasUncommittedChanges(); + if (hasUncommittedChanges || force) { + logger.trace("starting commit for flush;"); + + // TODO: do we need to close the latest commit as done in InternalEngine? + commitIndexWriter(indexWriter); + + logger.trace("finished commit for flush"); + + // a temporary debugging to investigate test failure - issue#32827. Remove when the issue is resolved + logger.debug("new commit on flush, hasUncommittedChanges:{}, force:{}", hasUncommittedChanges, force); + + // we need to refresh in order to clear older version values + refresh("version_table_flush", SearcherScope.INTERNAL, true); + } + } catch (FlushFailedEngineException ex) { + maybeFailEngine("flush", ex); + throw ex; + } catch (IOException e) { + throw new FlushFailedEngineException(shardId, e); + } finally { + flushLock.unlock(); + } + } + } + + /** + * Commits the specified index writer. + * + * @param writer the index writer to commit + */ + protected void commitIndexWriter(final IndexWriter writer) throws IOException { + try { + writer.setLiveCommitData(() -> { + /* + * The user data captured the min and max range of the stream poller + */ + final Map commitData = new HashMap<>(2); + + commitData.put(StreamPoller.BATCH_START, streamPoller.getBatchStartPointer().asString()); + final String currentForceMergeUUID = forceMergeUUID; + if (currentForceMergeUUID != null) { + commitData.put(FORCE_MERGE_UUID_KEY, currentForceMergeUUID); + } + logger.trace("committing writer with commit data [{}]", commitData); + return commitData.entrySet().iterator(); + }); + writer.commit(); + } catch (final Exception ex) { + try { + failEngine("lucene commit failed", ex); + } catch (final Exception inner) { + ex.addSuppressed(inner); + } + throw ex; + } catch (final AssertionError e) { + /* + * If assertions are enabled, IndexWriter throws AssertionError on commit if any files don't exist, but tests that randomly + * throw FileNotFoundException or NoSuchFileException can also hit this. + */ + if (ExceptionsHelper.stackTrace(e).contains("org.apache.lucene.index.IndexWriter.filesExist")) { + final EngineException engineException = new EngineException(shardId, "failed to commit engine", e); + try { + failEngine("lucene commit failed", engineException); + } catch (final Exception inner) { + engineException.addSuppressed(inner); + } + throw engineException; + } else { + throw e; + } + } + } + + @Override + public MergeStats getMergeStats() { + return mergeScheduler.stats(); + } + + @Override + public void onSettingsChanged(TimeValue translogRetentionAge, ByteSizeValue translogRetentionSize, long softDeletesRetentionOps) { + mergeScheduler.refreshConfig(); + // TODO: do we need more? + } + + protected Map commitDataAsMap() { + return commitDataAsMap(indexWriter); + } + + /** + * Gets the commit data from {@link IndexWriter} as a map. + */ + protected static Map commitDataAsMap(final IndexWriter indexWriter) { + final Map commitData = new HashMap<>(8); + for (Map.Entry entry : indexWriter.getLiveCommitData()) { + commitData.put(entry.getKey(), entry.getValue()); + } + return commitData; + } + + @Override + public void forceMerge( + boolean flush, + int maxNumSegments, + boolean onlyExpungeDeletes, + boolean upgrade, + boolean upgradeOnlyAncientSegments, + String forceMergeUUID + ) throws EngineException, IOException { + /* + * We do NOT acquire the readlock here since we are waiting on the merges to finish + * that's fine since the IW.rollback should stop all the threads and trigger an IOException + * causing us to fail the forceMerge + * + * The way we implement upgrades is a bit hackish in the sense that we set an instance + * variable and that this setting will thus apply to the next forced merge that will be run. + * This is ok because (1) this is the only place we call forceMerge, (2) we have a single + * thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler + * syncs calls to findForcedMerges. + */ + assert indexWriter.getConfig().getMergePolicy() instanceof OpenSearchMergePolicy : "MergePolicy is " + + indexWriter.getConfig().getMergePolicy().getClass().getName(); + OpenSearchMergePolicy mp = (OpenSearchMergePolicy) indexWriter.getConfig().getMergePolicy(); + optimizeLock.lock(); + try { + ensureOpen(); + if (upgrade) { + logger.info("starting segment upgrade upgradeOnlyAncientSegments={}", upgradeOnlyAncientSegments); + mp.setUpgradeInProgress(true, upgradeOnlyAncientSegments); + } + store.incRef(); // increment the ref just to ensure nobody closes the store while we optimize + try { + if (onlyExpungeDeletes) { + assert upgrade == false; + indexWriter.forceMergeDeletes(true /* blocks and waits for merges*/); + } else if (maxNumSegments <= 0) { + assert upgrade == false; + indexWriter.maybeMerge(); + } else { + indexWriter.forceMerge(maxNumSegments, true /* blocks and waits for merges*/); + this.forceMergeUUID = forceMergeUUID; + } + if (flush) { + flush(false, true); + } + if (upgrade) { + logger.info("finished segment upgrade"); + } + } finally { + store.decRef(); + } + } catch (AlreadyClosedException ex) { + /* in this case we first check if the engine is still open. If so this exception is just fine + * and expected. We don't hold any locks while we block on forceMerge otherwise it would block + * closing the engine as well. If we are not closed we pass it on to failOnTragicEvent which ensures + * we are handling a tragic even exception here */ + ensureOpen(ex); + failOnTragicEvent(ex); + throw ex; + } catch (Exception e) { + try { + maybeFailEngine(FORCE_MERGE, e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw e; + } finally { + try { + // reset it just to make sure we reset it in a case of an error + mp.setUpgradeInProgress(false, false); + } finally { + optimizeLock.unlock(); + } + } + } + + @Override + public GatedCloseable acquireLastIndexCommit(boolean flushFirst) throws EngineException { + store.incRef(); + try { + var reader = getReferenceManager(SearcherScope.INTERNAL).acquire(); + return new GatedCloseable<>(reader.getIndexCommit(), () -> { + store.decRef(); + getReferenceManager(SearcherScope.INTERNAL).release(reader); + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + public GatedCloseable acquireSafeIndexCommit() throws EngineException { + // TODO: do we need this? likely not + return acquireLastIndexCommit(false); + } + + @Override + public SafeCommitInfo getSafeCommitInfo() { + // TODO: do we need this? + return SafeCommitInfo.EMPTY; + } + + @Override + protected void closeNoLock(String reason, CountDownLatch closedLatch) { + if (isClosed.compareAndSet(false, true)) { + assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() + : "Either the write lock must be held or the engine must be currently be failing itself"; + try { + try { + IOUtils.close(externalReaderManager, internalReaderManager); + } catch (Exception e) { + logger.warn("Failed to close ReaderManager", e); + } + + // no need to commit in this case!, we snapshot before we close the shard, so translog and all sync'ed + logger.trace("rollback indexWriter"); + try { + indexWriter.rollback(); + } catch (AlreadyClosedException ex) { + failOnTragicEvent(ex); + throw ex; + } + logger.trace("rollback indexWriter done"); + } catch (Exception e) { + logger.warn("failed to rollback writer on close", e); + } finally { + try { + store.decRef(); + logger.debug("engine closed [{}]", reason); + } finally { + closedLatch.countDown(); + } + } + } + } + + private boolean failOnTragicEvent(AlreadyClosedException ex) { + final boolean engineFailed; + // if we are already closed due to some tragic exception + // we need to fail the engine. it might have already been failed before + // but we are double-checking it's failed and closed + if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { + final Exception tragicException; + if (indexWriter.getTragicException() instanceof Exception) { + tragicException = (Exception) indexWriter.getTragicException(); + } else { + tragicException = new RuntimeException(indexWriter.getTragicException()); + } + failEngine("already closed by tragic event on the index writer", tragicException); + engineFailed = true; + } else if (failedEngine.get() == null && isClosed.get() == false) { // we are closed but the engine is not failed yet? + // this smells like a bug - we only expect ACE if we are in a fatal case ie. either translog or IW is closed by + // a tragic event or has closed itself. if that is not the case we are in a buggy state and raise an assertion error + throw new AssertionError("Unexpected AlreadyClosedException", ex); + } else { + engineFailed = false; + } + return engineFailed; + } + + private final class EngineMergeScheduler extends OpenSearchConcurrentMergeScheduler { + private final AtomicInteger numMergesInFlight = new AtomicInteger(0); + private final AtomicBoolean isThrottling = new AtomicBoolean(); + + EngineMergeScheduler(ShardId shardId, IndexSettings indexSettings) { + super(shardId, indexSettings); + } + + @Override + public synchronized void beforeMerge(OnGoingMerge merge) { + int maxNumMerges = mergeScheduler.getMaxMergeCount(); + if (numMergesInFlight.incrementAndGet() > maxNumMerges) { + if (isThrottling.getAndSet(true) == false) { + logger.info("now throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); + activateThrottling(); + } + } + } + + @Override + public synchronized void afterMerge(OnGoingMerge merge) { + int maxNumMerges = mergeScheduler.getMaxMergeCount(); + if (numMergesInFlight.decrementAndGet() < maxNumMerges) { + if (isThrottling.getAndSet(false)) { + logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); + deactivateThrottling(); + } + } + if (indexWriter.hasPendingMerges() == false + && System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) { + // NEVER do this on a merge thread since we acquire some locks blocking here and if we concurrently rollback the writer + // we deadlock on engine#close for instance. + engineConfig.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + if (isClosed.get() == false) { + logger.warn("failed to flush after merge has finished"); + } + } + + @Override + protected void doRun() { + // if we have no pending merges and we are supposed to flush once merges have finished to + // free up transient disk usage of the (presumably biggish) segments that were just merged + flush(); + } + }); + } else if (merge.getTotalBytesSize() >= engineConfig.getIndexSettings().getFlushAfterMergeThresholdSize().getBytes()) { + // we hit a significant merge which would allow us to free up memory if we'd commit it hence on the next change + // we should execute a flush on the next operation if that's a flush after inactive or indexing a document. + // we could fork a thread and do it right away but we try to minimize forking and piggyback on outside events. + shouldPeriodicallyFlushAfterBigMerge.set(true); + } + } + + @Override + protected void handleMergeException(final Throwable exc) { + engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.debug("merge failure action rejected", e); + } + + @Override + protected void doRun() throws Exception { + /* + * We do this on another thread rather than the merge thread that we are initially called on so that we have complete + * confidence that the call stack does not contain catch statements that would cause the error that might be thrown + * here from being caught and never reaching the uncaught exception handler. + */ + failEngine(MERGE_FAILED, new MergePolicy.MergeException(exc)); + } + }); + } + } + + @Override + public void activateThrottling() { + // TODO: add this when we have a thread pool for indexing in parallel + } + + @Override + public void deactivateThrottling() { + // TODO: is this needed? + } + + @Override + public int fillSeqNoGaps(long primaryTerm) throws IOException { + // TODO: is this needed? + return 0; + } + + @Override + public void maybePruneDeletes() { + // no need to prune deletes in ingestion engine + } + + @Override + public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { + // TODO: is this needed? + } + + @Override + public long getMaxSeqNoOfUpdatesOrDeletes() { + // TODO: is this needed? + return 0; + } + + @Override + public void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary) { + // TODO: is this needed? + } + + @Override + public void close() throws IOException { + if (streamPoller != null) { + streamPoller.close(); + } + super.close(); + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java index 9071b0e7a1eb3..ac8e123e49204 100644 --- a/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NoOpEngine.java @@ -58,6 +58,8 @@ import java.util.Map; import java.util.function.Function; +import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_SNAPSHOT; + /** * NoOpEngine is an engine implementation that does nothing but the bare minimum * required in order to have an engine. All attempts to do something (search, @@ -158,21 +160,7 @@ public DocsStats docStats() { */ public TranslogManager translogManager() { try { - return new NoOpTranslogManager(shardId, readLock, this::ensureOpen, this.translogStats, new Translog.Snapshot() { - @Override - public void close() {} - - @Override - public int totalOperations() { - return 0; - } - - @Override - public Translog.Operation next() { - return null; - } - - }) { + return new NoOpTranslogManager(shardId, readLock, this::ensureOpen, this.translogStats, EMPTY_TRANSLOG_SNAPSHOT) { /** * This implementation will trim existing translog files using a {@link TranslogDeletionPolicy} * that retains nothing but the last translog generation from safe commit. diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 7ff3145055df8..1852f2fa92b74 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -66,6 +66,8 @@ import java.util.function.BiFunction; import java.util.function.Function; +import static org.opensearch.index.translog.Translog.EMPTY_TRANSLOG_SNAPSHOT; + /** * A basic read-only engine that allows switching a shard to be true read-only temporarily or permanently. * Note: this engine can be opened side-by-side with a read-write engine but will not reflect any changes made to the read-write @@ -150,20 +152,7 @@ public ReadOnlyEngine( completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); - translogManager = new NoOpTranslogManager(shardId, readLock, this::ensureOpen, this.translogStats, new Translog.Snapshot() { - @Override - public void close() {} - - @Override - public int totalOperations() { - return 0; - } - - @Override - public Translog.Operation next() { - return null; - } - }); + translogManager = new NoOpTranslogManager(shardId, readLock, this::ensureOpen, this.translogStats, EMPTY_TRANSLOG_SNAPSHOT); success = true; } finally { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 02f20504b07ba..f26e53967b873 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2515,8 +2515,10 @@ public void openEngineAndRecoverFromTranslog(boolean syncFromRemote) throws IOEx ); }; - // Do not load the global checkpoint if this is a remote snapshot index - if (indexSettings.isRemoteSnapshot() == false && indexSettings.isRemoteTranslogStoreEnabled() == false) { + // Do not load the global checkpoint if this is a remote snapshot index or using ingestion source + if (indexSettings.isRemoteSnapshot() == false + && indexSettings.isRemoteTranslogStoreEnabled() == false + && !indexSettings.getIndexMetadata().useIngestionSource()) { loadGlobalCheckpointToReplicationTracker(); } @@ -2635,6 +2637,9 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b // time elapses after the engine is created above (pulling the config settings) until we set the engine reference, during // which settings changes could possibly have happened, so here we forcefully push any config changes to the new engine. onSettingsChanged(); + if (indexSettings.getIndexMetadata().useIngestionSource()) { + return; + } assert assertSequenceNumbersInCommit(); recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); } @@ -4071,8 +4076,9 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro isReadOnlyReplica, this::enableUploadToRemoteTranslog, translogFactorySupplier.apply(indexSettings, shardRouting), - isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null // DESC @timestamp default order for + isTimeSeriesDescSortOptimizationEnabled() ? DataStream.TIMESERIES_LEAF_SORTER : null, // DESC @timestamp default order for // timeseries + () -> docMapper() ); } diff --git a/server/src/main/java/org/opensearch/index/translog/Translog.java b/server/src/main/java/org/opensearch/index/translog/Translog.java index 4b4ceb7444471..ffda06d8d8292 100644 --- a/server/src/main/java/org/opensearch/index/translog/Translog.java +++ b/server/src/main/java/org/opensearch/index/translog/Translog.java @@ -959,6 +959,21 @@ public int hashCode() { } } + public static final Translog.Snapshot EMPTY_TRANSLOG_SNAPSHOT = new Translog.Snapshot() { + @Override + public void close() {} + + @Override + public int totalOperations() { + return 0; + } + + @Override + public Translog.Operation next() { + return null; + } + }; + /** * A snapshot of the transaction log, allows to iterate over all the transaction log operations. * diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 67fab720d95dd..fdec2cebd2863 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -105,6 +105,7 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; +import org.opensearch.index.IngestionConsumerFactory; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.cache.request.ShardRequestCache; import org.opensearch.index.compositeindex.CompositeIndexSettings; @@ -147,6 +148,7 @@ import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.pollingingest.IngestionEngineFactory; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoverySettings; @@ -343,6 +345,7 @@ public class IndicesService extends AbstractLifecycleComponent private final MetaStateService metaStateService; private final Collection>> engineFactoryProviders; private final Map directoryFactories; + private final Map ingestionConsumerFactories; private final Map recoveryStateFactories; final AbstractRefCounted indicesRefCount; // pkg-private for testing private final CountDownLatch closeLatch = new CountDownLatch(1); @@ -395,6 +398,7 @@ public IndicesService( Supplier repositoriesServiceSupplier, SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + Map ingestionConsumerFactories, RecoverySettings recoverySettings, CacheService cacheService, RemoteStoreSettings remoteStoreSettings, @@ -452,6 +456,7 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; + this.ingestionConsumerFactories = ingestionConsumerFactories; // doClose() is called when shutting down a node, yet there might still be ongoing requests // that we need to wait for before closing some resources such as the caches. In order to // avoid closing these resources while ongoing requests are still being processed, we use a @@ -540,6 +545,7 @@ public IndicesService( Supplier repositoriesServiceSupplier, SearchRequestStats searchRequestStats, @Nullable RemoteStoreStatsTrackerFactory remoteStoreStatsTrackerFactory, + Map ingestionConsumerFactories, RecoverySettings recoverySettings, CacheService cacheService, RemoteStoreSettings remoteStoreSettings @@ -569,6 +575,7 @@ public IndicesService( repositoriesServiceSupplier, searchRequestStats, remoteStoreStatsTrackerFactory, + ingestionConsumerFactories, recoverySettings, cacheService, remoteStoreSettings, @@ -999,6 +1006,21 @@ private EngineConfigFactory getEngineConfigFactory(final IndexSettings idxSettin return new EngineConfigFactory(this.pluginsService, idxSettings); } + private IngestionConsumerFactory getIngestionConsumerFactory(final IndexSettings idxSettings) { + final IndexMetadata indexMetadata = idxSettings.getIndexMetadata(); + if (indexMetadata == null) { + return null; + } + if (indexMetadata.useIngestionSource()) { + String type = indexMetadata.getIngestionSource().getType().toUpperCase(Locale.ROOT); + if (!ingestionConsumerFactories.containsKey(type)) { + throw new IllegalArgumentException("No factory found for ingestion source type [" + type + "]"); + } + return ingestionConsumerFactories.get(type); + } + return null; + } + private EngineFactory getEngineFactory(final IndexSettings idxSettings) { final IndexMetadata indexMetadata = idxSettings.getIndexMetadata(); if (indexMetadata != null && indexMetadata.getState() == IndexMetadata.State.CLOSE) { @@ -1006,6 +1028,12 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { return NoOpEngine::new; } + // streaming ingestion + if (indexMetadata != null && indexMetadata.useIngestionSource()) { + IngestionConsumerFactory ingestionConsumerFactory = getIngestionConsumerFactory(idxSettings); + return new IngestionEngineFactory(ingestionConsumerFactory); + } + final List> engineFactories = engineFactoryProviders.stream() .map(engineFactoryProvider -> engineFactoryProvider.apply(idxSettings)) .filter(maybe -> Objects.requireNonNull(maybe).isPresent()) diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java new file mode 100644 index 0000000000000..b5c1db999544a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/DefaultStreamPoller.java @@ -0,0 +1,286 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.Nullable; +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.index.IngestionShardPointer; +import org.opensearch.index.Message; +import org.opensearch.index.engine.IngestionEngine; + +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +/** + * Default implementation of {@link StreamPoller} + */ +public class DefaultStreamPoller implements StreamPoller { + private static final Logger logger = LogManager.getLogger(DefaultStreamPoller.class); + + // TODO: make this configurable + public static final long MAX_POLL_SIZE = 1000; + public static final int POLL_TIMEOUT = 1000; + + private volatile State state = State.NONE; + + // goal state + private volatile boolean started; + private volatile boolean closed; + private volatile boolean paused; + + private IngestionShardConsumer consumer; + + private ExecutorService consumerThread; + + private ExecutorService processorThread; + + // start of the batch, inclusive + private IngestionShardPointer batchStartPointer; + + private ResetState resetState; + + private Set persistedPointers; + + private BlockingQueue> blockingQueue; + + private MessageProcessorRunnable processorRunnable; + + // A pointer to the max persisted pointer for optimizing the check + @Nullable + private IngestionShardPointer maxPersistedPointer; + + public DefaultStreamPoller( + IngestionShardPointer startPointer, + Set persistedPointers, + IngestionShardConsumer consumer, + IngestionEngine ingestionEngine, + ResetState resetState + ) { + this( + startPointer, + persistedPointers, + consumer, + new MessageProcessorRunnable(new ArrayBlockingQueue<>(100), ingestionEngine), + resetState + ); + } + + DefaultStreamPoller( + IngestionShardPointer startPointer, + Set persistedPointers, + IngestionShardConsumer consumer, + MessageProcessorRunnable processorRunnable, + ResetState resetState + ) { + this.consumer = Objects.requireNonNull(consumer); + this.resetState = resetState; + batchStartPointer = startPointer; + this.persistedPointers = persistedPointers; + if (!this.persistedPointers.isEmpty()) { + maxPersistedPointer = this.persistedPointers.stream().max(IngestionShardPointer::compareTo).get(); + } + this.processorRunnable = processorRunnable; + blockingQueue = processorRunnable.getBlockingQueue(); + this.consumerThread = Executors.newSingleThreadExecutor( + r -> new Thread( + r, + String.format(Locale.ROOT, "stream-poller-consumer-%d-%d", consumer.getShardId(), System.currentTimeMillis()) + ) + ); + + // TODO: allow multiple threads for processing the messages in parallel + this.processorThread = Executors.newSingleThreadExecutor( + r -> new Thread( + r, + String.format(Locale.ROOT, "stream-poller-processor-%d-%d", consumer.getShardId(), System.currentTimeMillis()) + ) + ); + } + + @Override + public void start() { + if (closed) { + throw new RuntimeException("poller is closed!"); + } + started = true; + consumerThread.submit(this::startPoll); + processorThread.submit(processorRunnable); + } + + /** + * Start the poller. visibile for testing + */ + protected void startPoll() { + if (!started) { + throw new IllegalStateException("poller is not started!"); + } + if (closed) { + throw new IllegalStateException("poller is closed!"); + } + logger.info("Starting poller for shard {}", consumer.getShardId()); + + while (true) { + try { + if (closed) { + state = State.CLOSED; + break; + } + + // reset the offset + if (resetState != ResetState.NONE) { + switch (resetState) { + case EARLIEST: + batchStartPointer = consumer.earliestPointer(); + logger.info("Resetting offset by seeking to earliest offset {}", batchStartPointer.asString()); + break; + case LATEST: + batchStartPointer = consumer.latestPointer(); + logger.info("Resetting offset by seeking to latest offset {}", batchStartPointer.asString()); + break; + } + resetState = ResetState.NONE; + } + + if (paused) { + state = State.PAUSED; + try { + // TODO: make sleep time configurable + Thread.sleep(100); + } catch (Throwable e) { + logger.error("Error in pausing the poller of shard {}: {}", consumer.getShardId(), e); + } + continue; + } + + state = State.POLLING; + + List> results = consumer.readNext( + batchStartPointer, + MAX_POLL_SIZE, + POLL_TIMEOUT + ); + + if (results.isEmpty()) { + // no new records + continue; + } + + state = State.PROCESSING; + // process the records + for (IngestionShardConsumer.ReadResult result : results) { + // check if the message is already processed + if (isProcessed(result.getPointer())) { + logger.info("Skipping message with pointer {} as it is already processed", result.getPointer().asString()); + continue; + } + blockingQueue.put(result); + logger.debug( + "Put message {} with pointer {} to the blocking queue", + String.valueOf(result.getMessage().getPayload()), + result.getPointer().asString() + ); + } + // update the batch start pointer to the next batch + batchStartPointer = consumer.nextPointer(); + } catch (Throwable e) { + // TODO better error handling + logger.error("Error in polling the shard {}: {}", consumer.getShardId(), e); + } + } + } + + private boolean isProcessed(IngestionShardPointer pointer) { + if (maxPersistedPointer == null) { + return false; + } + if (pointer.compareTo(maxPersistedPointer) > 0) { + return false; + } + return persistedPointers.contains(pointer); + } + + /** + * Visible for testing. Get the max persisted pointer + * @return the max persisted pointer + */ + protected IngestionShardPointer getMaxPersistedPointer() { + return maxPersistedPointer; + } + + @Override + public void pause() { + if (closed) { + throw new RuntimeException("consumer is closed!"); + } + paused = true; + } + + @Override + public void resume() { + if (closed) { + throw new RuntimeException("consumer is closed!"); + } + paused = false; + } + + @Override + public void close() { + closed = true; + if (!started) { + logger.info("consumer thread not started"); + return; + } + long startTime = System.currentTimeMillis(); // Record the start time + long timeout = 5000; + while (state != State.CLOSED) { + // Check if the timeout has been reached + if (System.currentTimeMillis() - startTime > timeout) { + logger.error("Timeout reached while waiting for shard {} to close", consumer.getShardId()); + break; // Exit the loop if the timeout is reached + } + try { + Thread.sleep(100); + } catch (Throwable e) { + logger.error("Error in closing the poller of shard {}: {}", consumer.getShardId(), e); + } + } + blockingQueue.clear(); + consumerThread.shutdown(); + // interrupts the processor + processorThread.shutdownNow(); + logger.info("closed the poller of shard {}", consumer.getShardId()); + } + + @Override + public boolean isPaused() { + return paused; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public IngestionShardPointer getBatchStartPointer() { + return batchStartPointer; + } + + public State getState() { + return state; + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java new file mode 100644 index 0000000000000..e124adb90365b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/IngestionEngineFactory.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.index.IngestionConsumerFactory; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.EngineConfig; +import org.opensearch.index.engine.EngineFactory; +import org.opensearch.index.engine.IngestionEngine; + +import java.util.Objects; + +/** + * Engine Factory implementation used with streaming ingestion. + */ +public class IngestionEngineFactory implements EngineFactory { + + private final IngestionConsumerFactory ingestionConsumerFactory; + + public IngestionEngineFactory(IngestionConsumerFactory ingestionConsumerFactory) { + this.ingestionConsumerFactory = Objects.requireNonNull(ingestionConsumerFactory); + } + + @Override + public Engine newReadWriteEngine(EngineConfig config) { + IngestionEngine ingestionEngine = new IngestionEngine(config, ingestionConsumerFactory); + ingestionEngine.start(); + return ingestionEngine; + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java new file mode 100644 index 0000000000000..53f9353477869 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/MessageProcessorRunnable.java @@ -0,0 +1,236 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.index.Term; +import org.opensearch.action.DocWriteRequest; +import org.opensearch.common.lucene.uid.Versions; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.index.IngestionShardPointer; +import org.opensearch.index.Message; +import org.opensearch.index.VersionType; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.IngestionEngine; +import org.opensearch.index.mapper.IdFieldMapper; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.mapper.SourceToParse; +import org.opensearch.index.mapper.Uid; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; + +/** + * A class to process messages from the ingestion stream. It extracts the payload from the message and creates an + * engine operation. + */ +public class MessageProcessorRunnable implements Runnable { + private static final Logger logger = LogManager.getLogger(MessageProcessorRunnable.class); + + private final BlockingQueue> blockingQueue; + private final MessageProcessor messageProcessor; + + private static final String ID = "_id"; + private static final String OP_TYPE = "_op_type"; + private static final String SOURCE = "_source"; + + /** + * Constructor. + * + * @param blockingQueue the blocking queue to poll messages from + * @param engine the ingestion engine + */ + public MessageProcessorRunnable( + BlockingQueue> blockingQueue, + IngestionEngine engine + ) { + this(blockingQueue, new MessageProcessor(engine)); + } + + /** + * Constructor visible for testing. + * @param blockingQueue the blocking queue to poll messages from + * @param messageProcessor the message processor + */ + MessageProcessorRunnable( + BlockingQueue> blockingQueue, + MessageProcessor messageProcessor + ) { + this.blockingQueue = Objects.requireNonNull(blockingQueue); + this.messageProcessor = messageProcessor; + } + + static class MessageProcessor { + private final IngestionEngine engine; + private final String index; + + MessageProcessor(IngestionEngine engine) { + this(engine, engine.config().getIndexSettings().getIndex().getName()); + } + + /** + * visible for testing + * @param engine the ingestion engine + * @param index the index name + */ + MessageProcessor(IngestionEngine engine, String index) { + this.engine = engine; + this.index = index; + } + + /** + * Visible for testing. Process the message and create an engine operation. + * + * Process the message and create an engine operation. It also records the offset in the document as (1) a point + * field used for range search, (2) a stored field for retrieval. + * + * @param message the message to process + * @param pointer the pointer to the message + */ + protected void process(Message message, IngestionShardPointer pointer) { + byte[] payload = (byte[]) message.getPayload(); + + try { + Engine.Operation operation = getOperation(payload, pointer); + switch (operation.operationType()) { + case INDEX: + engine.index((Engine.Index) operation); + break; + case DELETE: + engine.delete((Engine.Delete) operation); + break; + default: + throw new IllegalArgumentException("Invalid operation: " + operation); + } + } catch (IOException e) { + logger.error("Failed to process operation from message {} at pointer {}: {}", message, pointer, e); + throw new RuntimeException(e); + } + } + + /** + * Visible for testing. Get the engine operation from the message. + * @param payload the payload of the message + * @param pointer the pointer to the message + * @return the engine operation + */ + protected Engine.Operation getOperation(byte[] payload, IngestionShardPointer pointer) throws IOException { + BytesReference payloadBR = new BytesArray(payload); + Map payloadMap = XContentHelper.convertToMap(payloadBR, false, MediaTypeRegistry.xContentType(payloadBR)).v2(); + + String id = (String) payloadMap.getOrDefault(ID, "null"); + if (payloadMap.containsKey(OP_TYPE) && !(payloadMap.get(OP_TYPE) instanceof String)) { + // TODO: add metric + logger.error("_op_type field is of type {} but not string, skipping the message", payloadMap.get(OP_TYPE).getClass()); + return null; + } + String opTypeString = (String) payloadMap.getOrDefault(OP_TYPE, "index"); + DocWriteRequest.OpType opType = DocWriteRequest.OpType.fromString(opTypeString); + + Engine.Operation operation; + switch (opType) { + case INDEX: + if (!payloadMap.containsKey(SOURCE)) { + // TODO: add metric + logger.error("missing _source field, skipping the message"); + return null; + } + if (!(payloadMap.get(SOURCE) instanceof Map)) { + // TODO: add metric + logger.error("_source field does not contain a map, skipping the message"); + return null; + } + BytesReference source = convertToBytes(payloadMap.get(SOURCE)); + + SourceToParse sourceToParse = new SourceToParse(index, id, source, MediaTypeRegistry.xContentType(source), null); + // TODO: handle parsing err + ParsedDocument doc = engine.getDocumentMapperForType().getDocumentMapper().parse(sourceToParse); + ParseContext.Document document = doc.rootDoc(); + // set the offset as the offset field + document.add(pointer.asPointField(IngestionShardPointer.OFFSET_FIELD)); + // store the offset as string in stored field + document.add(new StoredField(IngestionShardPointer.OFFSET_FIELD, pointer.asString())); + + operation = new Engine.Index( + new Term("_id", id), + doc, + 0, + 1, + Versions.MATCH_ANY, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + System.nanoTime(), + System.currentTimeMillis(), + false, + UNASSIGNED_SEQ_NO, + 0 + ); + break; + case DELETE: + operation = new Engine.Delete( + id, + new Term(IdFieldMapper.NAME, Uid.encodeId(id)), + 0, + 1, + Versions.MATCH_ANY, + VersionType.INTERNAL, + Engine.Operation.Origin.PRIMARY, + System.nanoTime(), + UNASSIGNED_SEQ_NO, + 0 + ); + break; + default: + logger.error("Unsupported operation type {}", opType); + return null; + } + + return operation; + } + } + + private static BytesReference convertToBytes(Object object) throws IOException { + assert object instanceof Map; + return BytesReference.bytes(XContentFactory.jsonBuilder().map((Map) object)); + } + + BlockingQueue> getBlockingQueue() { + return blockingQueue; + } + + @Override + public void run() { + while (!(Thread.currentThread().isInterrupted())) { + IngestionShardConsumer.ReadResult result = null; + try { + result = blockingQueue.poll(1000, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + // TODO: add metric + logger.debug("MessageProcessorRunnable poll interruptedException", e); + Thread.currentThread().interrupt(); // Restore interrupt status + } + if (result != null) { + messageProcessor.process(result.getMessage(), result.getPointer()); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java new file mode 100644 index 0000000000000..f674f6dc55c85 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/StreamPoller.java @@ -0,0 +1,71 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.index.IngestionShardPointer; + +import java.io.Closeable; + +/** + * A poller for reading messages from an ingestion shard. This is used in the ingestion engine. + */ +public interface StreamPoller extends Closeable { + + String BATCH_START = "batch_start"; + + /** + * Start the poller + */ + void start();; + + /** + * Pause the poller + */ + void pause(); + + /** + * Resume the poller polling + */ + void resume(); + + /** + * @return if the poller is paused + */ + boolean isPaused(); + + /** + * check if the poller is closed + */ + boolean isClosed(); + + /** + * get the pointer to the start of the current batch of messages. + */ + IngestionShardPointer getBatchStartPointer(); + + /** + * a state to indicate the current state of the poller + */ + enum State { + NONE, + CLOSED, + PAUSED, + POLLING, + PROCESSING, + } + + /** + * a reset state to indicate how to reset the pointer + */ + enum ResetState { + EARLIEST, + LATEST, + NONE, + } +} diff --git a/server/src/main/java/org/opensearch/indices/pollingingest/package-info.java b/server/src/main/java/org/opensearch/indices/pollingingest/package-info.java new file mode 100644 index 0000000000000..1c3d860bf2559 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/pollingingest/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Indices ingestion module package. */ +package org.opensearch.indices.pollingingest; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index c2c4e68dae257..622b8f7bfc565 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -150,6 +150,7 @@ import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.IndexingPressureService; +import org.opensearch.index.IngestionConsumerFactory; import org.opensearch.index.SegmentReplicationStatsTracker; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.compositeindex.CompositeIndexSettings; @@ -207,6 +208,7 @@ import org.opensearch.plugins.IdentityPlugin; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.IngestPlugin; +import org.opensearch.plugins.IngestionConsumerPlugin; import org.opensearch.plugins.MapperPlugin; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.plugins.NetworkPlugin; @@ -859,6 +861,11 @@ protected Node( .map(plugin -> (Function>) plugin::getEngineFactory) .collect(Collectors.toList()); + // collect ingestion consumer factory providers from plugins + final Map ingestionConsumerFactories = new HashMap<>(); + pluginsService.filterPlugins(IngestionConsumerPlugin.class) + .forEach(plugin -> ingestionConsumerFactories.putAll(plugin.getIngestionConsumerFactories())); + final Map builtInDirectoryFactories = IndexModule.createBuiltInDirectoryFactories( repositoriesServiceReference::get, threadPool, @@ -944,6 +951,7 @@ protected Node( repositoriesServiceReference::get, searchRequestStats, remoteStoreStatsTrackerFactory, + ingestionConsumerFactories, recoverySettings, cacheService, remoteStoreSettings, diff --git a/server/src/main/java/org/opensearch/plugins/IngestionConsumerPlugin.java b/server/src/main/java/org/opensearch/plugins/IngestionConsumerPlugin.java new file mode 100644 index 0000000000000..e4b9889b5bfa7 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/IngestionConsumerPlugin.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.IngestionConsumerFactory; + +import java.util.Map; + +/** + * An extension point for {@link Plugin} implementations to add custom ingestion consumers for the {@link org.opensearch.index.engine.IngestionEngine} + * + * @opensearch.api + */ +@ExperimentalApi +public interface IngestionConsumerPlugin { + + /** + * When an ingestion index is created this method is invoked for each ingestion consumer plugin. + * Ingestion consumer plugins can inspect the index settings to determine which ingestion consumer to provide. + * + * @return a map from the ingestion consumer type to the factory + */ + Map getIngestionConsumerFactories(); + + /** + * @return the type of the ingestion consumer plugin. the type name shall be in upper case + */ + String getType(); +} diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index e1a3b4618035e..78f302e9b23db 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -167,4 +167,5 @@ grant { permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; permission org.opensearch.secure_sm.ThreadContextPermission "stashAndMergeHeaders"; permission org.opensearch.secure_sm.ThreadContextPermission "stashWithOrigin"; + permission java.lang.RuntimePermission "setDefaultUncaughtExceptionHandler"; }; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java new file mode 100644 index 0000000000000..f67d13e54e608 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/metadata/IngestionSourceTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.metadata; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class IngestionSourceTests extends OpenSearchTestCase { + + public void testConstructorAndGetters() { + Map params = new HashMap<>(); + params.put("key", "value"); + IngestionSource source = new IngestionSource("type", "pointerInitReset", params); + + assertEquals("type", source.getType()); + assertEquals("pointerInitReset", source.getPointerInitReset()); + assertEquals(params, source.params()); + } + + public void testEquals() { + Map params1 = new HashMap<>(); + params1.put("key", "value"); + IngestionSource source1 = new IngestionSource("type", "pointerInitReset", params1); + + Map params2 = new HashMap<>(); + params2.put("key", "value"); + IngestionSource source2 = new IngestionSource("type", "pointerInitReset", params2); + + assertTrue(source1.equals(source2)); + assertTrue(source2.equals(source1)); + + IngestionSource source3 = new IngestionSource("differentType", "pointerInitReset", params1); + assertFalse(source1.equals(source3)); + } + + public void testHashCode() { + Map params1 = new HashMap<>(); + params1.put("key", "value"); + IngestionSource source1 = new IngestionSource("type", "pointerInitReset", params1); + + Map params2 = new HashMap<>(); + params2.put("key", "value"); + IngestionSource source2 = new IngestionSource("type", "pointerInitReset", params2); + + assertEquals(source1.hashCode(), source2.hashCode()); + + IngestionSource source3 = new IngestionSource("differentType", "pointerInitReset", params1); + assertNotEquals(source1.hashCode(), source3.hashCode()); + } + + public void testToString() { + Map params = new HashMap<>(); + params.put("key", "value"); + IngestionSource source = new IngestionSource("type", "pointerInitReset", params); + + String expected = "IngestionSource{type='type',pointer_init_reset='pointerInitReset', params={key=value}}"; + assertEquals(expected, source.toString()); + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java index bf9a86cff8b76..f8ed68eb2e0a3 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java @@ -70,6 +70,7 @@ public void testCreateEngineConfigFromFactory() { false, () -> Boolean.TRUE, new InternalTranslogFactory(), + null, null ); @@ -150,6 +151,7 @@ public void testCreateCodecServiceFromFactory() { false, () -> Boolean.TRUE, new InternalTranslogFactory(), + null, null ); assertNotNull(config.getCodec()); diff --git a/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java new file mode 100644 index 0000000000000..de03dcd313c29 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/engine/FakeIngestionSource.java @@ -0,0 +1,183 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.search.Query; +import org.opensearch.index.IngestionConsumerFactory; +import org.opensearch.index.IngestionShardConsumer; +import org.opensearch.index.IngestionShardPointer; +import org.opensearch.index.Message; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeoutException; + +/** + * A fake ingestion source for testing purposes. + */ +public class FakeIngestionSource { + + public static class FakeIngestionConsumerFactory implements IngestionConsumerFactory { + private List messages; + + public FakeIngestionConsumerFactory(List messages) { + this.messages = messages; + } + + @Override + public void initialize(Map params) {} + + @Override + public FakeIngestionConsumer createShardConsumer(String clientId, int shardId) { + return new FakeIngestionConsumer(messages, shardId); + } + + @Override + public FakeIngestionShardPointer parsePointerFromString(String pointer) { + return new FakeIngestionShardPointer(Long.valueOf(pointer)); + } + } + + public static class FakeIngestionConsumer implements IngestionShardConsumer { + // FakeIngestionConsumer uses a list of byte arrays to simulate streams + private List messages; + private int shardId; + private long lastFetchedOffset; + + public FakeIngestionConsumer(List messages, int shardId) { + this.messages = messages; + this.shardId = shardId; + this.lastFetchedOffset = -1; + } + + @Override + public List> readNext( + FakeIngestionShardPointer pointer, + long maxMessages, + int timeoutMillis + ) throws TimeoutException { + lastFetchedOffset = pointer.offset - 1; + int numToFetch = Math.min(messages.size() - (int) pointer.offset, (int) maxMessages); + List> result = new ArrayList<>(); + for (long i = pointer.offset; i < pointer.offset + numToFetch; i++) { + result.add(new ReadResult<>(new FakeIngestionShardPointer(i), new FakeIngestionMessage(messages.get((int) i)))); + lastFetchedOffset = i; + } + return result; + } + + @Override + public FakeIngestionShardPointer nextPointer() { + return new FakeIngestionShardPointer(lastFetchedOffset + 1); + } + + @Override + public FakeIngestionShardPointer earliestPointer() { + return new FakeIngestionShardPointer(0); + } + + @Override + public FakeIngestionShardPointer latestPointer() { + return new FakeIngestionShardPointer(messages.size()); + } + + @Override + public int getShardId() { + return shardId; + } + + @Override + public void close() throws IOException { + + } + } + + public static class FakeIngestionMessage implements Message { + private final byte[] payload; + + public FakeIngestionMessage(byte[] payload) { + this.payload = payload; + } + + @Override + public byte[] getPayload() { + return payload; + } + + @Override + public String toString() { + return new String(payload, StandardCharsets.UTF_8); + } + } + + public static class FakeIngestionShardPointer implements IngestionShardPointer { + private final long offset; + + public FakeIngestionShardPointer(long offset) { + this.offset = offset; + } + + @Override + public byte[] serialize() { + ByteBuffer buffer = ByteBuffer.allocate(Long.BYTES); + buffer.putLong(offset); + return buffer.array(); + } + + @Override + public String asString() { + return String.valueOf(offset); + } + + @Override + public String toString() { + return asString(); + } + + @Override + public Field asPointField(String fieldName) { + return new LongPoint(fieldName, offset); + } + + @Override + public Query newRangeQueryGreaterThan(String fieldName) { + return LongPoint.newRangeQuery(fieldName, offset, Long.MAX_VALUE); + } + + @Override + public int compareTo(IngestionShardPointer o) { + FakeIngestionShardPointer other = (FakeIngestionShardPointer) o; + return Long.compare(offset, other.offset); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + FakeIngestionShardPointer that = (FakeIngestionShardPointer) o; + return offset == that.offset; + } + + @Override + public int hashCode() { + return Long.hashCode(offset); + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java new file mode 100644 index 0000000000000..19718384bd926 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/engine/IngestionEngineTests.java @@ -0,0 +1,189 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.NoMergePolicy; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.IngestionShardPointer; +import org.opensearch.index.mapper.DocumentMapperForType; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.Store; +import org.opensearch.index.translog.Translog; +import org.opensearch.indices.pollingingest.StreamPoller; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.IndexSettingsModule; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static org.awaitility.Awaitility.await; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; + +public class IngestionEngineTests extends EngineTestCase { + + private IndexSettings indexSettings; + private Store ingestionEngineStore; + private IngestionEngine ingestionEngine; + // the messages of the stream to ingest from + private List messages; + + @Override + @Before + public void setUp() throws Exception { + indexSettings = newIndexSettings(); + super.setUp(); + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + ingestionEngineStore = createStore(indexSettings, newDirectory()); + // create some initial messages + messages = new ArrayList<>(); + publishData("{\"_id\":\"2\",\"_source\":{\"name\":\"bob\", \"age\": 24}}"); + publishData("{\"_id\":\"1\",\"_source\":{\"name\":\"alice\", \"age\": 20}}"); + ingestionEngine = buildIngestionEngine(globalCheckpoint, ingestionEngineStore, indexSettings); + } + + private void publishData(String message) { + messages.add(message.getBytes(StandardCharsets.UTF_8)); + } + + protected IndexSettings newIndexSettings() { + return IndexSettingsModule.newIndexSettings( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_INGESTION_SOURCE_TYPE, "fake") + .put(IndexMetadata.SETTING_INGESTION_SOURCE_POINTER_INIT_RESET, "earliest") + .build() + ); + } + + @Override + @After + public void tearDown() throws Exception { + if (ingestionEngine != null) { + ingestionEngine.close(); + } + if (ingestionEngineStore != null) { + ingestionEngineStore.close(); + } + super.tearDown(); + } + + public void testCreateEngine() throws IOException { + // wait for the engine to ingest all messages + waitForResults(ingestionEngine, 2); + // flush + ingestionEngine.flush(false, true); + Map commitData = ingestionEngine.commitDataAsMap(); + // verify the commit data + Assert.assertEquals(1, commitData.size()); + Assert.assertEquals("2", commitData.get(StreamPoller.BATCH_START)); + + // verify the stored offsets + var offset = new FakeIngestionSource.FakeIngestionShardPointer(0); + ingestionEngine.refresh("read_offset"); + try (Engine.Searcher searcher = ingestionEngine.acquireSearcher("read_offset")) { + Set persistedPointers = ingestionEngine.fetchPersistedOffsets( + Lucene.wrapAllDocsLive(searcher.getDirectoryReader()), + offset + ); + Assert.assertEquals(2, persistedPointers.size()); + } + } + + public void testRecovery() throws IOException { + // wait for the engine to ingest all messages + waitForResults(ingestionEngine, 2); + // flush + ingestionEngine.flush(false, true); + + // ingest some new messages + publishData("{\"_id\":\"3\",\"_source\":{\"name\":\"john\", \"age\": 30}}"); + publishData("{\"_id\":\"4\",\"_source\":{\"name\":\"jane\", \"age\": 25}}"); + ingestionEngine.close(); + ingestionEngine = buildIngestionEngine(new AtomicLong(2), ingestionEngineStore, indexSettings); + waitForResults(ingestionEngine, 4); + } + + public void testCreationFailure() throws IOException { + // Simulate an error scenario + Store mockStore = mock(Store.class); + doThrow(new IOException("Simulated IOException")).when(mockStore).readLastCommittedSegmentsInfo(); + + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + FakeIngestionSource.FakeIngestionConsumerFactory consumerFactory = new FakeIngestionSource.FakeIngestionConsumerFactory(messages); + EngineConfig engineConfig = config( + indexSettings, + store, + createTempDir(), + NoMergePolicy.INSTANCE, + null, + null, + globalCheckpoint::get + ); + // overwrite the config with ingestion engine settings + String mapping = "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}"; + MapperService mapperService = createMapperService(mapping); + engineConfig = config(engineConfig, () -> new DocumentMapperForType(mapperService.documentMapper(), null)); + try { + new IngestionEngine(engineConfig, consumerFactory); + fail("Expected EngineException to be thrown"); + } catch (EngineException e) { + assertEquals("failed to create engine", e.getMessage()); + assertTrue(e.getCause() instanceof IOException); + } + } + + private IngestionEngine buildIngestionEngine(AtomicLong globalCheckpoint, Store store, IndexSettings settings) throws IOException { + FakeIngestionSource.FakeIngestionConsumerFactory consumerFactory = new FakeIngestionSource.FakeIngestionConsumerFactory(messages); + EngineConfig engineConfig = config(settings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + // overwrite the config with ingestion engine settings + String mapping = "{\"properties\":{\"name\":{\"type\": \"text\"},\"age\":{\"type\": \"integer\"}}}}"; + MapperService mapperService = createMapperService(mapping); + engineConfig = config(engineConfig, () -> new DocumentMapperForType(mapperService.documentMapper(), null)); + if (!Lucene.indexExists(store.directory())) { + store.createEmpty(engineConfig.getIndexSettings().getIndexVersionCreated().luceneVersion); + final String translogUuid = Translog.createEmptyTranslog( + engineConfig.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + primaryTerm.get() + ); + store.associateIndexWithNewTranslog(translogUuid); + } + IngestionEngine ingestionEngine = new IngestionEngine(engineConfig, consumerFactory); + ingestionEngine.start(); + return ingestionEngine; + } + + private void waitForResults(Engine engine, int numDocs) { + await().atMost(3, TimeUnit.SECONDS).untilAsserted(() -> { Assert.assertTrue(resultsFound(engine, numDocs)); }); + } + + private boolean resultsFound(Engine engine, int numDocs) { + engine.refresh("index"); + try (Engine.Searcher searcher = engine.acquireSearcher("index")) { + return searcher.getIndexReader().numDocs() == numDocs; + } + } +} diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java new file mode 100644 index 0000000000000..1a98f65d04f7c --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/pollingingest/DefaultStreamPollerTests.java @@ -0,0 +1,168 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.index.IngestionShardPointer; +import org.opensearch.index.engine.FakeIngestionSource; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.After; +import org.junit.Before; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class DefaultStreamPollerTests extends OpenSearchTestCase { + private DefaultStreamPoller poller; + private FakeIngestionSource.FakeIngestionConsumer fakeConsumer; + private MessageProcessorRunnable processorRunnable; + private MessageProcessorRunnable.MessageProcessor processor; + private List messages; + private Set persistedPointers; + private final int sleepTime = 300; + + @Before + public void setUp() throws Exception { + super.setUp(); + messages = new ArrayList<>(); + ; + messages.add("{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"_id\":\"2\",\"_source\":{\"name\":\"alice\", \"age\": 21}}".getBytes(StandardCharsets.UTF_8)); + fakeConsumer = new FakeIngestionSource.FakeIngestionConsumer(messages, 0); + processor = mock(MessageProcessorRunnable.MessageProcessor.class); + processorRunnable = new MessageProcessorRunnable(new ArrayBlockingQueue<>(5), processor); + persistedPointers = new HashSet<>(); + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.NONE + ); + } + + @After + public void tearDown() throws Exception { + if (!poller.isClosed()) { + poller.close(); + } + super.tearDown(); + } + + public void testPauseAndResume() throws InterruptedException { + poller.pause(); + poller.start(); + Thread.sleep(sleepTime); // Allow some time for the poller to run + assertEquals(DefaultStreamPoller.State.PAUSED, poller.getState()); + assertTrue(poller.isPaused()); + // no messages are processed + verify(processor, never()).process(any(), any()); + + poller.resume(); + Thread.sleep(sleepTime); // Allow some time for the poller to run + assertFalse(poller.isPaused()); + // 2 messages are processed + verify(processor, times(2)).process(any(), any()); + } + + public void testSkipProcessed() throws InterruptedException { + messages.add("{\"name\":\"cathy\", \"age\": 21}".getBytes(StandardCharsets.UTF_8)); + messages.add("{\"name\":\"danny\", \"age\": 31}".getBytes(StandardCharsets.UTF_8)); + persistedPointers.add(new FakeIngestionSource.FakeIngestionShardPointer(1)); + persistedPointers.add(new FakeIngestionSource.FakeIngestionShardPointer(2)); + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.NONE + ); + poller.start(); + Thread.sleep(sleepTime); // Allow some time for the poller to run + // 2 messages are processed, 2 messages are skipped + verify(processor, times(2)).process(any(), any()); + assertEquals(new FakeIngestionSource.FakeIngestionShardPointer(2), poller.getMaxPersistedPointer()); + } + + public void testCloseWithoutStart() { + poller.close(); + assertTrue(poller.isClosed()); + } + + public void testClose() throws InterruptedException { + poller.start(); + Thread.sleep(sleepTime); // Allow some time for the poller to run + poller.close(); + assertTrue(poller.isClosed()); + assertEquals(DefaultStreamPoller.State.CLOSED, poller.getState()); + } + + public void testResetStateEarliest() throws InterruptedException { + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(1), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.EARLIEST + ); + + poller.start(); + Thread.sleep(sleepTime); // Allow some time for the poller to run + + // 2 messages are processed + verify(processor, times(2)).process(any(), any()); + } + + public void testResetStateLatest() throws InterruptedException { + poller = new DefaultStreamPoller( + new FakeIngestionSource.FakeIngestionShardPointer(0), + persistedPointers, + fakeConsumer, + processorRunnable, + StreamPoller.ResetState.LATEST + ); + + poller.start(); + Thread.sleep(sleepTime); // Allow some time for the poller to run + // no messages processed + verify(processor, never()).process(any(), any()); + // reset to the latest + assertEquals(new FakeIngestionSource.FakeIngestionShardPointer(2), poller.getBatchStartPointer()); + } + + public void testStartPollWithoutStart() { + try { + poller.startPoll(); + fail("Expected an exception to be thrown"); + } catch (IllegalStateException e) { + assertEquals("poller is not started!", e.getMessage()); + } + } + + public void testStartClosedPoller() throws InterruptedException { + poller.start(); + Thread.sleep(sleepTime); + poller.close(); + try { + poller.startPoll(); + fail("Expected an exception to be thrown"); + } catch (IllegalStateException e) { + assertEquals("poller is closed!", e.getMessage()); + } + } +} diff --git a/server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java b/server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java new file mode 100644 index 0000000000000..273e25c0a5bfc --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/pollingingest/MessageProcessorTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.pollingingest; + +import org.opensearch.index.engine.Engine; +import org.opensearch.index.engine.FakeIngestionSource; +import org.opensearch.index.engine.IngestionEngine; +import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.DocumentMapperForType; +import org.opensearch.index.mapper.ParseContext; +import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.mapper.SourceToParse; +import org.opensearch.test.OpenSearchTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import org.mockito.ArgumentCaptor; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class MessageProcessorTests extends OpenSearchTestCase { + private IngestionEngine ingestionEngine; + private DocumentMapper documentMapper; + private DocumentMapperForType documentMapperForType; + private MessageProcessorRunnable.MessageProcessor processor; + + @Before + public void setUp() throws Exception { + super.setUp(); + ingestionEngine = mock(IngestionEngine.class); + documentMapperForType = mock(DocumentMapperForType.class); + when(ingestionEngine.getDocumentMapperForType()).thenReturn(documentMapperForType); + + documentMapper = mock(DocumentMapper.class); + when(documentMapperForType.getDocumentMapper()).thenReturn(documentMapper); + processor = new MessageProcessorRunnable.MessageProcessor(ingestionEngine, "index"); + } + + public void testGetIndexOperation() throws IOException { + byte[] payload = "{\"_id\":\"1\",\"_source\":{\"name\":\"bob\", \"age\": 24}}".getBytes(StandardCharsets.UTF_8); + FakeIngestionSource.FakeIngestionShardPointer pointer = new FakeIngestionSource.FakeIngestionShardPointer(0); + + ParsedDocument parsedDocument = mock(ParsedDocument.class); + when(documentMapper.parse(any())).thenReturn(parsedDocument); + when(parsedDocument.rootDoc()).thenReturn(new ParseContext.Document()); + + Engine.Operation operation = processor.getOperation(payload, pointer); + + assertTrue(operation instanceof Engine.Index); + ArgumentCaptor captor = ArgumentCaptor.forClass(SourceToParse.class); + verify(documentMapper).parse(captor.capture()); + assertEquals("index", captor.getValue().index()); + assertEquals("1", captor.getValue().id()); + } + + public void testGetDeleteOperation() throws IOException { + byte[] payload = "{\"_id\":\"1\",\"_op_type\":\"delete\"}".getBytes(StandardCharsets.UTF_8); + FakeIngestionSource.FakeIngestionShardPointer pointer = new FakeIngestionSource.FakeIngestionShardPointer(0); + + Engine.Operation operation = processor.getOperation(payload, pointer); + + assertTrue(operation instanceof Engine.Delete); + Engine.Delete deleteOperation = (Engine.Delete) operation; + assertEquals("1", deleteOperation.id()); + } + + public void testSkipNoSourceIndexOperation() throws IOException { + byte[] payload = "{\"_id\":\"1\"}".getBytes(StandardCharsets.UTF_8); + FakeIngestionSource.FakeIngestionShardPointer pointer = new FakeIngestionSource.FakeIngestionShardPointer(0); + + Engine.Operation operation = processor.getOperation(payload, pointer); + assertNull(operation); + + // source has wrong type + payload = "{\"_id\":\"1\", \"_source\":1}".getBytes(StandardCharsets.UTF_8); + + operation = processor.getOperation(payload, pointer); + assertNull(operation); + } + + public void testUnsupportedOperation() throws IOException { + byte[] payload = "{\"_id\":\"1\", \"_op_tpe\":\"update\"}".getBytes(StandardCharsets.UTF_8); + FakeIngestionSource.FakeIngestionShardPointer pointer = new FakeIngestionSource.FakeIngestionShardPointer(0); + + Engine.Operation operation = processor.getOperation(payload, pointer); + assertNull(operation); + } +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 493a3ed431e00..a50c1d704362e 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -2102,6 +2102,7 @@ public void onFailure(final Exception e) { repositoriesServiceReference::get, null, new RemoteStoreStatsTrackerFactory(clusterService, settings), + emptyMap(), DefaultRecoverySettings.INSTANCE, new CacheModule(new ArrayList<>(), settings).getCacheService(), DefaultRemoteStoreSettings.INSTANCE diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 3403425d89254..d6cd5cfb81dc4 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -96,6 +96,7 @@ import org.opensearch.index.codec.CodecService; import org.opensearch.index.fieldvisitor.IdOnlyFieldVisitor; import org.opensearch.index.mapper.DocumentMapper; +import org.opensearch.index.mapper.DocumentMapperForType; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.Mapping; @@ -987,6 +988,40 @@ protected EngineConfig config( .build(); } + /** + * Override config with ingestion engine configs + */ + protected EngineConfig config(EngineConfig config, Supplier documentMapperForTypeSupplier) { + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "test", + Settings.builder().put(config.getIndexSettings().getSettings()).build() + ); + return new EngineConfig.Builder().shardId(config.getShardId()) + .threadPool(config.getThreadPool()) + .indexSettings(indexSettings) + .warmer(config.getWarmer()) + .store(config.getStore()) + .mergePolicy(config.getMergePolicy()) + .analyzer(config.getAnalyzer()) + .similarity(config.getSimilarity()) + .codecService(new CodecService(null, indexSettings, logger)) + .eventListener(config.getEventListener()) + .queryCache(config.getQueryCache()) + .queryCachingPolicy(config.getQueryCachingPolicy()) + .translogConfig(config.getTranslogConfig()) + .flushMergesAfter(config.getFlushMergesAfter()) + .externalRefreshListener(config.getExternalRefreshListener()) + .internalRefreshListener(config.getInternalRefreshListener()) + .indexSort(config.getIndexSort()) + .circuitBreakerService(config.getCircuitBreakerService()) + .globalCheckpointSupplier(config.getGlobalCheckpointSupplier()) + .retentionLeasesSupplier(config.retentionLeasesSupplier()) + .primaryTermSupplier(config.getPrimaryTermSupplier()) + .tombstoneDocSupplier(config.getTombstoneDocSupplier()) + .documentMapperForTypeSupplier(documentMapperForTypeSupplier) + .build(); + } + protected EngineConfig noOpConfig(IndexSettings indexSettings, Store store, Path translogPath) { return noOpConfig(indexSettings, store, translogPath, null); } @@ -1541,6 +1576,10 @@ public static void assertAtMostOneLuceneDocumentPerSequenceNumber(IndexSettings } public static MapperService createMapperService() throws IOException { + return createMapperService("{\"properties\": {}}"); + } + + public static MapperService createMapperService(String mapping) throws IOException { IndexMetadata indexMetadata = IndexMetadata.builder("test") .settings( Settings.builder() @@ -1548,7 +1587,7 @@ public static MapperService createMapperService() throws IOException { .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) ) - .putMapping("{\"properties\": {}}") + .putMapping(mapping) .build(); MapperService mapperService = MapperTestUtils.newMapperService( new NamedXContentRegistry(ClusterModule.getNamedXWriteables()), diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index e1eafb8570022..318549f676edf 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -765,6 +765,13 @@ public final void createIndex(String name, Settings indexSettings) { assertAcked(prepareCreate(name).setSettings(indexSettings)); } + /** + * creates an index with the given setting and mapping + */ + public final void createIndex(String name, Settings indexSettings, String mapping) { + assertAcked(prepareCreate(name).setSettings(indexSettings).setMapping(mapping)); + } + /** * creates an index with the given setting */ From b9900ee5c36dd73925babfa4d29c8d085f48b962 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 29 Jan 2025 14:37:25 -0500 Subject: [PATCH 32/48] Fix java.security.AccessControlException during OpenSearch server shutdown cycle (#17183) Signed-off-by: Andriy Redko --- .../transport/SharedGroupFactory.java | 6 +-- .../plugin-metadata/plugin-security.policy | 5 +- .../transport/reactor/SharedGroupFactory.java | 6 +-- .../plugin-metadata/plugin-security.policy | 5 +- .../util/concurrent/OpenSearchExecutors.java | 51 +++++++++++++++++++ .../org/opensearch/bootstrap/security.policy | 1 + 6 files changed, 62 insertions(+), 12 deletions(-) diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java index 454293442572c..00c7f666d2b35 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/SharedGroupFactory.java @@ -47,7 +47,7 @@ import io.netty.channel.nio.NioEventLoopGroup; import io.netty.util.concurrent.Future; -import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.privilegedDaemonThreadFactory; /** * Creates and returns {@link io.netty.channel.EventLoopGroup} instances. It will return a shared group for @@ -91,7 +91,7 @@ public synchronized SharedGroup getHttpGroup() { if (dedicatedHttpGroup == null) { NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( httpWorkerCount, - daemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) + privilegedDaemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) ); dedicatedHttpGroup = new SharedGroup(new RefCountedGroup(eventLoopGroup)); } @@ -103,7 +103,7 @@ private SharedGroup getGenericGroup() { if (genericGroup == null) { EventLoopGroup eventLoopGroup = new NioEventLoopGroup( workerCount, - daemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) + privilegedDaemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) ); this.genericGroup = new RefCountedGroup(eventLoopGroup); } else { diff --git a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy index c8eee6bb577d0..62cac9cda2a3e 100644 --- a/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy +++ b/modules/transport-netty4/src/main/plugin-metadata/plugin-security.policy @@ -30,7 +30,7 @@ * GitHub history for details. */ -grant codeBase "${codebase.netty-common}" { +grant { // for reading the system-wide configuration for the backlog of established sockets permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; @@ -39,9 +39,8 @@ grant codeBase "${codebase.netty-common}" { // Netty sets custom classloader for some of its internal threads permission java.lang.RuntimePermission "*", "setContextClassLoader"; -}; + permission java.lang.RuntimePermission "getClassLoader"; -grant codeBase "${codebase.netty-transport}" { // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java index ab7de33c8e673..7df888fefce32 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/SharedGroupFactory.java @@ -29,7 +29,7 @@ import io.netty.channel.nio.NioEventLoopGroup; import io.netty.util.concurrent.Future; -import static org.opensearch.common.util.concurrent.OpenSearchExecutors.daemonThreadFactory; +import static org.opensearch.common.util.concurrent.OpenSearchExecutors.privilegedDaemonThreadFactory; /** * Creates and returns {@link io.netty.channel.EventLoopGroup} instances. It will return a shared group for @@ -89,7 +89,7 @@ public synchronized SharedGroup getHttpGroup() { if (dedicatedHttpGroup == null) { NioEventLoopGroup eventLoopGroup = new NioEventLoopGroup( httpWorkerCount, - daemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) + privilegedDaemonThreadFactory(settings, HttpServerTransport.HTTP_SERVER_WORKER_THREAD_NAME_PREFIX) ); dedicatedHttpGroup = new SharedGroup(new RefCountedGroup(eventLoopGroup)); } @@ -101,7 +101,7 @@ private SharedGroup getGenericGroup() { if (genericGroup == null) { EventLoopGroup eventLoopGroup = new NioEventLoopGroup( workerCount, - daemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) + privilegedDaemonThreadFactory(settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX) ); this.genericGroup = new RefCountedGroup(eventLoopGroup); } else { diff --git a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy index 4f2dcde995338..2b589d7518988 100644 --- a/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/transport-reactor-netty4/src/main/plugin-metadata/plugin-security.policy @@ -6,7 +6,7 @@ * compatible open source license. */ -grant codeBase "${codebase.netty-common}" { +grant { // for reading the system-wide configuration for the backlog of established sockets permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; @@ -15,9 +15,8 @@ grant codeBase "${codebase.netty-common}" { // Netty sets custom classloader for some of its internal threads permission java.lang.RuntimePermission "*", "setContextClassLoader"; -}; + permission java.lang.RuntimePermission "getClassLoader"; -grant codeBase "${codebase.netty-transport}" { // Netty NioEventLoop wants to change this, because of https://bugs.openjdk.java.net/browse/JDK-6427854 // the bug says it only happened rarely, and that its fixed, but apparently it still happens rarely! permission java.util.PropertyPermission "sun.nio.ch.bugLevel", "write"; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java index 6e45c3fb7b58d..df2badee08060 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/OpenSearchExecutors.java @@ -43,6 +43,8 @@ import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TaskAwareRunnable; +import java.security.AccessController; +import java.security.PrivilegedAction; import java.util.List; import java.util.Optional; import java.util.concurrent.AbstractExecutorService; @@ -382,6 +384,19 @@ public static ThreadFactory daemonThreadFactory(String namePrefix) { return new OpenSearchThreadFactory(namePrefix); } + public static ThreadFactory privilegedDaemonThreadFactory(Settings settings, String namePrefix) { + return privilegedDaemonThreadFactory(threadName(settings, namePrefix)); + } + + public static ThreadFactory privilegedDaemonThreadFactory(String nodeName, String namePrefix) { + assert nodeName != null && false == nodeName.isEmpty(); + return privilegedDaemonThreadFactory(threadName(nodeName, namePrefix)); + } + + public static ThreadFactory privilegedDaemonThreadFactory(String namePrefix) { + return new PrivilegedOpenSearchThreadFactory(namePrefix); + } + /** * A thread factory * @@ -409,6 +424,42 @@ public Thread newThread(Runnable r) { } + /** + * A thread factory + * + * @opensearch.internal + */ + static class PrivilegedOpenSearchThreadFactory implements ThreadFactory { + + final ThreadGroup group; + final AtomicInteger threadNumber = new AtomicInteger(1); + final String namePrefix; + + @SuppressWarnings("removal") + PrivilegedOpenSearchThreadFactory(String namePrefix) { + this.namePrefix = namePrefix; + SecurityManager s = System.getSecurityManager(); + group = (s != null) ? s.getThreadGroup() : Thread.currentThread().getThreadGroup(); + } + + @Override + public Thread newThread(Runnable r) { + final Thread t = new Thread(group, new Runnable() { + @SuppressWarnings({ "deprecation", "removal" }) + @Override + public void run() { + AccessController.doPrivileged((PrivilegedAction) () -> { + r.run(); + return null; + }); + } + }, namePrefix + "[T#" + threadNumber.getAndIncrement() + "]", 0); + t.setDaemon(true); + return t; + } + + } + /** * Cannot instantiate. */ diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 22e445f7d9022..a6d6014b26bfb 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -46,6 +46,7 @@ grant codeBase "${codebase.opensearch-secure-sm}" { grant codeBase "${codebase.opensearch}" { // needed for loading plugins which may expect the context class loader to be set permission java.lang.RuntimePermission "setContextClassLoader"; + permission java.lang.RuntimePermission "getClassLoader"; // needed for SPI class loading permission java.lang.RuntimePermission "accessDeclaredMembers"; permission org.opensearch.secure_sm.ThreadContextPermission "markAsSystemContext"; From 6e81d231481972ec64d3fddc8794d8b3ac75cc71 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Wed, 29 Jan 2025 12:51:03 -0800 Subject: [PATCH 33/48] Remove deprecated 'reindex.remote.whitelist` setting (#17188) This has been replaced by `reindex.remote.allowlist`. Signed-off-by: Andrew Ross --- .../index/reindex/ReindexModulePlugin.java | 1 - .../index/reindex/TransportReindexAction.java | 11 +-- .../reindex/ReindexRenamedSettingTests.java | 85 ------------------- 3 files changed, 1 insertion(+), 96 deletions(-) delete mode 100644 modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java index aa48da4cb2421..783b011b99249 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexModulePlugin.java @@ -130,7 +130,6 @@ public Collection createComponents( @Override public List> getSettings() { final List> settings = new ArrayList<>(); - settings.add(TransportReindexAction.REMOTE_CLUSTER_WHITELIST); settings.add(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST); settings.add(TransportReindexAction.REMOTE_REINDEX_RETRY_INITIAL_BACKOFF); settings.add(TransportReindexAction.REMOTE_REINDEX_RETRY_MAX_COUNT); diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java index c9a970a4118b3..87ef85a5ca2a0 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/TransportReindexAction.java @@ -57,18 +57,9 @@ import static java.util.Collections.emptyList; public class TransportReindexAction extends HandledTransportAction { - static final Setting> REMOTE_CLUSTER_WHITELIST = Setting.listSetting( - "reindex.remote.whitelist", - emptyList(), - Function.identity(), - Property.NodeScope, - Property.Deprecated - ); - // The setting below is going to replace the above. - // To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage. public static final Setting> REMOTE_CLUSTER_ALLOWLIST = Setting.listSetting( "reindex.remote.allowlist", - REMOTE_CLUSTER_WHITELIST, + emptyList(), Function.identity(), Property.NodeScope ); diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java deleted file mode 100644 index be4bacce9b57c..0000000000000 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexRenamedSettingTests.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.index.reindex; - -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.test.OpenSearchTestCase; - -import java.util.Arrays; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItems; - -/** - * A unit test to validate the former name of the setting 'reindex.remote.allowlist' still take effect, - * after it is deprecated, so that the backwards compatibility is maintained. - * The test can be removed along with removing support of the deprecated setting. - */ -public class ReindexRenamedSettingTests extends OpenSearchTestCase { - private final ReindexModulePlugin plugin = new ReindexModulePlugin(); - - /** - * Validate the both settings are known and supported. - */ - public void testReindexSettingsExist() { - List> settings = plugin.getSettings(); - assertThat( - "Both 'reindex.remote.allowlist' and its predecessor should be supported settings of Reindex plugin", - settings, - hasItems(TransportReindexAction.REMOTE_CLUSTER_WHITELIST, TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST) - ); - } - - /** - * Validate the default value of the both settings is the same. - */ - public void testSettingFallback() { - assertThat( - TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST.get(Settings.EMPTY), - equalTo(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.get(Settings.EMPTY)) - ); - } - - /** - * Validate the new setting can be configured correctly, and it doesn't impact the old setting. - */ - public void testSettingGetValue() { - Settings settings = Settings.builder().put("reindex.remote.allowlist", "127.0.0.1:*").build(); - assertThat(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST.get(settings), equalTo(Arrays.asList("127.0.0.1:*"))); - assertThat( - TransportReindexAction.REMOTE_CLUSTER_WHITELIST.get(settings), - equalTo(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getDefault(Settings.EMPTY)) - ); - } - - /** - * Validate the value of the old setting will be applied to the new setting, if the new setting is not configured. - */ - public void testSettingGetValueWithFallback() { - Settings settings = Settings.builder().put("reindex.remote.whitelist", "127.0.0.1:*").build(); - assertThat(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST.get(settings), equalTo(Arrays.asList("127.0.0.1:*"))); - assertSettingDeprecationsAndWarnings(new Setting[] { TransportReindexAction.REMOTE_CLUSTER_WHITELIST }); - } - - /** - * Validate the value of the old setting will be ignored, if the new setting is configured. - */ - public void testSettingGetValueWhenBothAreConfigured() { - Settings settings = Settings.builder() - .put("reindex.remote.allowlist", "127.0.0.1:*") - .put("reindex.remote.whitelist", "[::1]:*, 127.0.0.1:*") - .build(); - assertThat(TransportReindexAction.REMOTE_CLUSTER_ALLOWLIST.get(settings), equalTo(Arrays.asList("127.0.0.1:*"))); - assertThat(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.get(settings), equalTo(Arrays.asList("[::1]:*", "127.0.0.1:*"))); - assertSettingDeprecationsAndWarnings(new Setting[] { TransportReindexAction.REMOTE_CLUSTER_WHITELIST }); - } - -} From fd37ac8fa84d4a76536409662cf3d840ba85af4f Mon Sep 17 00:00:00 2001 From: Ankit Jain Date: Thu, 30 Jan 2025 02:47:44 +0530 Subject: [PATCH 34/48] Correcting the changelog description (#17192) Signed-off-by: Ankit Jain --- CHANGELOG-3.0.md | 1 + CHANGELOG.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index bd7fd5fb621ce..b02b0339603b0 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -11,6 +11,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Implement on behalf of token passing for extensions ([#8679](https://github.com/opensearch-project/OpenSearch/pull/8679), [#10664](https://github.com/opensearch-project/OpenSearch/pull/10664)) - Provide service accounts tokens to extensions ([#9618](https://github.com/opensearch-project/OpenSearch/pull/9618)) - GHA to verify checklist items completion in PR descriptions ([#10800](https://github.com/opensearch-project/OpenSearch/pull/10800)) +- [WLM] Add WLM support for search scroll API ([#16981](https://github.com/opensearch-project/OpenSearch/pull/16981)) - Allow to pass the list settings through environment variables (like [], ["a", "b", "c"], ...) ([#10625](https://github.com/opensearch-project/OpenSearch/pull/10625)) - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) - Added pull-based Ingestion (APIs, for ingestion source, a Kafka plugin, and IngestionEngine that pulls data from the ingestion source) ([#16958](https://github.com/opensearch-project/OpenSearch/pull/16958)) diff --git a/CHANGELOG.md b/CHANGELOG.md index f386b092cf074..242a10d4ae6da 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -116,7 +116,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) - Stop processing search requests when _msearch request is cancelled ([#17005](https://github.com/opensearch-project/OpenSearch/pull/17005)) - Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) -- [WLM] Add WLM support for search scroll API ([#16981](https://github.com/opensearch-project/OpenSearch/pull/16981)) +- [WLM] Fix the QueryGroupTask logging bug ([#17169](https://github.com/opensearch-project/OpenSearch/pull/17169)) - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Use OpenSearch version to deserialize remote custom metadata([#16494](https://github.com/opensearch-project/OpenSearch/pull/16494)) - Fix AutoDateHistogramAggregator rounding assertion failure ([#17023](https://github.com/opensearch-project/OpenSearch/pull/17023)) From 6f644e1c12de709b2136b999c8cb112a27c62100 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Wed, 29 Jan 2025 14:53:52 -0800 Subject: [PATCH 35/48] Fix flaky TransportMultiSearchActionTests.testCancellation (#17193) I recently added this test, but incorrectly placed a CountDownLatch#await call on the test thread. With this change, we actually kick off the request, return control to the testy thread, cancel the request, then continue executing. Signed-off-by: Michael Froh --- .../TransportMultiSearchActionTests.java | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java index 45980e7137ce4..f969313b71833 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java @@ -321,11 +321,8 @@ public TaskManager getTaskManager() { // and if there are more searches than is allowed create an error and remember that. int maxAllowedConcurrentSearches = 1; // Allow 1 search at a time. AtomicInteger counter = new AtomicInteger(); - AtomicReference errorHolder = new AtomicReference<>(); - // randomize whether or not requests are executed asynchronously ExecutorService executorService = threadPool.executor(ThreadPool.Names.GENERIC); - final Set requests = Collections.newSetFromMap(Collections.synchronizedMap(new IdentityHashMap<>())); - CountDownLatch countDownLatch = new CountDownLatch(1); + CountDownLatch canceledLatch = new CountDownLatch(1); CancellableTask[] parentTask = new CancellableTask[1]; NodeClient client = new NodeClient(settings, threadPool) { @Override @@ -333,14 +330,15 @@ public void search(final SearchRequest request, final ActionListener { + try { + if (!canceledLatch.await(1, TimeUnit.SECONDS)) { + fail("Latch should have counted down"); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } counter.decrementAndGet(); listener.onResponse( new SearchResponse( @@ -399,7 +397,7 @@ public void onFailure(Task task, Exception e) { } }); parentTask[0].cancel("Giving up"); - countDownLatch.countDown(); + canceledLatch.countDown(); assertNull(responses[0]); assertNull(exceptions[0]); From 679a08f505a5567b7ac88ec08161afb02201848d Mon Sep 17 00:00:00 2001 From: Prudhvi Godithi Date: Wed, 29 Jan 2025 17:34:28 -0800 Subject: [PATCH 36/48] Refactor for JPMS Support (#17153) Signed-off-by: Prudhvi Godithi --- CHANGELOG-3.0.md | 3 +- distribution/build.gradle | 4 +- distribution/src/bin/opensearch-keystore | 2 +- distribution/src/bin/opensearch-keystore.bat | 2 +- distribution/src/bin/opensearch-plugin | 2 +- distribution/src/bin/opensearch-plugin.bat | 2 +- .../cli/keystore}/AddFileKeyStoreCommand.java | 3 +- .../keystore}/AddStringKeyStoreCommand.java | 3 +- .../cli/keystore}/BaseKeyStoreCommand.java | 52 ++++++++++++-- .../ChangeKeyStorePasswordCommand.java | 20 +++++- .../cli/keystore}/CreateKeyStoreCommand.java | 4 +- .../keystore}/HasPasswordKeyStoreCommand.java | 24 ++++++- .../cli/keystore}/KeyStoreAwareCommand.java | 39 +++++++--- .../cli/keystore}/KeyStoreCli.java | 4 +- .../cli/keystore}/ListKeyStoreCommand.java | 3 +- .../RemoveSettingKeyStoreCommand.java | 3 +- .../cli/keystore}/UpgradeKeyStoreCommand.java | 19 ++++- .../cli/keystore}/package-info.java | 2 +- .../opensearch/bootstrap/BootstrapTests.java | 2 +- .../AddFileKeyStoreCommandTests.java | 3 +- .../AddStringKeyStoreCommandTests.java | 3 +- .../ChangeKeyStorePasswordCommandTests.java | 2 +- .../keystore}/CreateKeyStoreCommandTests.java | 3 +- .../HasPasswordKeyStoreCommandTests.java | 2 +- .../keystore}/KeyStoreCommandTestCase.java | 4 +- .../cli/keystore}/KeyStoreWrapperTests.java | 3 +- .../keystore}/ListKeyStoreCommandTests.java | 2 +- .../RemoveSettingKeyStoreCommandTests.java | 2 +- .../UpgradeKeyStoreCommandTests.java | 3 +- .../cli/plugin}/InstallPluginCommand.java | 20 ++---- .../cli/plugin}/ListPluginsCommand.java | 6 +- .../cli/plugin}/PluginCli.java | 4 +- .../cli/plugin}/PluginHelper.java | 4 +- .../tools/cli/plugin}/PluginSecurity.java | 4 +- .../cli/plugin}/ProgressInputStream.java | 2 +- .../cli/plugin}/RemovePluginCommand.java | 16 ++--- .../cli/plugin}/package-info.java | 2 +- .../plugin}/InstallPluginCommandTests.java | 5 +- .../cli/plugin}/ListPluginsCommandTests.java | 4 +- .../cli/plugin}/ProgressInputStreamTests.java | 2 +- .../cli/plugin}/RemovePluginCommandTests.java | 3 +- .../upgrade/DetectEsInstallationTask.java | 2 +- .../cli}/upgrade/ImportJvmOptionsTask.java | 2 +- .../cli}/upgrade/ImportKeystoreTask.java | 3 +- .../upgrade/ImportLog4jPropertiesTask.java | 2 +- .../cli}/upgrade/ImportYmlConfigTask.java | 2 +- .../cli}/upgrade/InstallPluginsTask.java | 2 +- .../cli/upgrade}/KeystoreWrapperUtil.java | 4 +- .../{ => tools/cli}/upgrade/TaskInput.java | 2 +- .../{ => tools/cli}/upgrade/UpgradeCli.java | 4 +- .../{ => tools/cli}/upgrade/UpgradeTask.java | 2 +- .../cli}/upgrade/ValidateInputTask.java | 2 +- .../cli/upgrade}/package-info.java | 2 +- .../org/opensearch/upgrade/package-info.java | 19 ----- .../DetectEsInstallationTaskTests.java | 2 +- .../ImportLog4jPropertiesTaskTests.java | 2 +- .../upgrade/ImportYmlConfigTaskTests.java | 2 +- .../cli}/upgrade/InstallPluginsTaskTests.java | 2 +- .../cli}/upgrade/UpgradeCliTests.java | 2 +- .../cli}/upgrade/ValidateInputTaskTests.java | 2 +- libs/cli/build.gradle | 8 +-- .../main/java/org/opensearch/cli/Command.java | 2 +- .../java/org/opensearch/cli/CommandTests.java | 0 .../org/opensearch/cli/MultiCommandTests.java | 0 .../org/opensearch/cli/TerminalTests.java | 0 .../ExtendedPluginsClassLoader.java | 2 +- qa/evil-tests/build.gradle | 1 + .../cli/EvilEnvironmentAwareCommandTests.java | 1 + .../cli/plugin}/PluginSecurityTests.java | 8 +-- .../plugin}/complex-plugin-security.policy | 0 .../cli/plugin}/simple-plugin-security.policy | 0 .../plugin}/unresolved-plugin-security.policy | 0 .../http/IdentityAuthenticationIT.java | 1 - .../org/opensearch/bootstrap/Bootstrap.java | 4 +- .../org/opensearch/bootstrap/OpenSearch.java | 2 +- .../opensearch/cli/LoggingAwareCommand.java | 53 -------------- .../cluster/coordination/NodeToolCli.java | 2 +- .../coordination/OpenSearchNodeCommand.java | 2 +- .../cli/CommandLoggingConfigurator.java | 2 +- .../cli/EnvironmentAwareCommand.java | 8 ++- .../cli/LoggingAwareMultiCommand.java | 4 +- .../{ => common}/cli/package-info.java | 2 +- .../common/settings/KeyStoreWrapper.java | 8 +-- .../opensearch/index/shard/ShardToolCli.java | 2 +- .../plugins/PluginLoaderIndirection.java | 2 + .../opensearch/plugins/PluginsService.java | 35 ++++++++- .../plugins/PluginsServiceTests.java | 72 +++++++++++++++++++ 87 files changed, 367 insertions(+), 206 deletions(-) rename distribution/tools/keystore-cli/src/main/java/org/opensearch/{common/settings => tools/cli/keystore}/AddFileKeyStoreCommand.java (97%) rename distribution/tools/keystore-cli/src/main/java/org/opensearch/{common/settings => tools/cli/keystore}/AddStringKeyStoreCommand.java (97%) rename {server/src/main/java/org/opensearch/common/settings => distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore}/BaseKeyStoreCommand.java (65%) rename {server/src/main/java/org/opensearch/common/settings => distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore}/ChangeKeyStorePasswordCommand.java (70%) rename distribution/tools/keystore-cli/src/main/java/org/opensearch/{common/settings => tools/cli/keystore}/CreateKeyStoreCommand.java (96%) rename {server/src/main/java/org/opensearch/common/settings => distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore}/HasPasswordKeyStoreCommand.java (70%) rename {server/src/main/java/org/opensearch/cli => distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore}/KeyStoreAwareCommand.java (70%) rename distribution/tools/keystore-cli/src/main/java/org/opensearch/{common/settings => tools/cli/keystore}/KeyStoreCli.java (95%) rename distribution/tools/keystore-cli/src/main/java/org/opensearch/{common/settings => tools/cli/keystore}/ListKeyStoreCommand.java (95%) rename distribution/tools/keystore-cli/src/main/java/org/opensearch/{common/settings => tools/cli/keystore}/RemoveSettingKeyStoreCommand.java (96%) rename {server/src/main/java/org/opensearch/common/settings => distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore}/UpgradeKeyStoreCommand.java (64%) rename distribution/tools/keystore-cli/src/main/java/org/opensearch/{common/settings => tools/cli/keystore}/package-info.java (87%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/AddFileKeyStoreCommandTests.java (99%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/AddStringKeyStoreCommandTests.java (99%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/ChangeKeyStorePasswordCommandTests.java (99%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/CreateKeyStoreCommandTests.java (97%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/HasPasswordKeyStoreCommandTests.java (98%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/KeyStoreCommandTestCase.java (97%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/KeyStoreWrapperTests.java (99%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/ListKeyStoreCommandTests.java (98%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/RemoveSettingKeyStoreCommandTests.java (99%) rename distribution/tools/keystore-cli/src/test/java/org/opensearch/{common/settings => tools/cli/keystore}/UpgradeKeyStoreCommandTests.java (97%) rename distribution/tools/plugin-cli/src/main/java/org/opensearch/{plugins => tools/cli/plugin}/InstallPluginCommand.java (98%) rename distribution/tools/plugin-cli/src/main/java/org/opensearch/{plugins => tools/cli/plugin}/ListPluginsCommand.java (94%) rename distribution/tools/plugin-cli/src/main/java/org/opensearch/{plugins => tools/cli/plugin}/PluginCli.java (95%) rename distribution/tools/plugin-cli/src/main/java/org/opensearch/{plugins => tools/cli/plugin}/PluginHelper.java (95%) rename {server/src/main/java/org/opensearch/plugins => distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin}/PluginSecurity.java (98%) rename distribution/tools/plugin-cli/src/main/java/org/opensearch/{plugins => tools/cli/plugin}/ProgressInputStream.java (98%) rename distribution/tools/plugin-cli/src/main/java/org/opensearch/{plugins => tools/cli/plugin}/RemovePluginCommand.java (94%) rename distribution/tools/plugin-cli/src/main/java/org/opensearch/{plugins => tools/cli/plugin}/package-info.java (87%) rename distribution/tools/plugin-cli/src/test/java/org/opensearch/{plugins => tools/cli/plugin}/InstallPluginCommandTests.java (99%) rename distribution/tools/plugin-cli/src/test/java/org/opensearch/{plugins => tools/cli/plugin}/ListPluginsCommandTests.java (98%) rename distribution/tools/plugin-cli/src/test/java/org/opensearch/{plugins => tools/cli/plugin}/ProgressInputStreamTests.java (98%) rename distribution/tools/plugin-cli/src/test/java/org/opensearch/{plugins => tools/cli/plugin}/RemovePluginCommandTests.java (99%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/DetectEsInstallationTask.java (99%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/ImportJvmOptionsTask.java (97%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/ImportKeystoreTask.java (96%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/ImportLog4jPropertiesTask.java (98%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/ImportYmlConfigTask.java (99%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/InstallPluginsTask.java (99%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{common/settings => tools/cli/upgrade}/KeystoreWrapperUtil.java (89%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/TaskInput.java (98%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/UpgradeCli.java (95%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/UpgradeTask.java (97%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{ => tools/cli}/upgrade/ValidateInputTask.java (98%) rename distribution/tools/upgrade-cli/src/main/java/org/opensearch/{common/settings => tools/cli/upgrade}/package-info.java (87%) delete mode 100644 distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/package-info.java rename distribution/tools/upgrade-cli/src/test/java/org/opensearch/{ => tools/cli}/upgrade/DetectEsInstallationTaskTests.java (98%) rename distribution/tools/upgrade-cli/src/test/java/org/opensearch/{ => tools/cli}/upgrade/ImportLog4jPropertiesTaskTests.java (98%) rename distribution/tools/upgrade-cli/src/test/java/org/opensearch/{ => tools/cli}/upgrade/ImportYmlConfigTaskTests.java (98%) rename distribution/tools/upgrade-cli/src/test/java/org/opensearch/{ => tools/cli}/upgrade/InstallPluginsTaskTests.java (98%) rename distribution/tools/upgrade-cli/src/test/java/org/opensearch/{ => tools/cli}/upgrade/UpgradeCliTests.java (99%) rename distribution/tools/upgrade-cli/src/test/java/org/opensearch/{ => tools/cli}/upgrade/ValidateInputTaskTests.java (98%) rename {server => libs/cli}/src/test/java/org/opensearch/cli/CommandTests.java (100%) rename {server => libs/cli}/src/test/java/org/opensearch/cli/MultiCommandTests.java (100%) rename {server => libs/cli}/src/test/java/org/opensearch/cli/TerminalTests.java (100%) rename libs/plugin-classloader/src/main/java/org/opensearch/{plugins => plugin/classloader}/ExtendedPluginsClassLoader.java (98%) rename qa/evil-tests/src/test/java/org/opensearch/{plugins => tools/cli/plugin}/PluginSecurityTests.java (92%) rename qa/evil-tests/src/test/resources/org/opensearch/{plugins/security => tools/cli/plugin}/complex-plugin-security.policy (100%) rename qa/evil-tests/src/test/resources/org/opensearch/{plugins/security => tools/cli/plugin}/simple-plugin-security.policy (100%) rename qa/evil-tests/src/test/resources/org/opensearch/{plugins/security => tools/cli/plugin}/unresolved-plugin-security.policy (100%) delete mode 100644 server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java rename server/src/main/java/org/opensearch/{ => common}/cli/CommandLoggingConfigurator.java (98%) rename server/src/main/java/org/opensearch/{ => common}/cli/EnvironmentAwareCommand.java (96%) rename server/src/main/java/org/opensearch/{ => common}/cli/LoggingAwareMultiCommand.java (96%) rename server/src/main/java/org/opensearch/{ => common}/cli/package-info.java (87%) diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index b02b0339603b0..40a1950ed1c78 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -29,7 +29,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add task completion count in search backpressure stats API ([#10028](https://github.com/opensearch-project/OpenSearch/pull/10028/)) - Deprecate CamelCase `PathHierarchy` tokenizer name in favor to lowercase `path_hierarchy` ([#10894](https://github.com/opensearch-project/OpenSearch/pull/10894)) - Breaking change: Do not request "search_pipelines" metrics by default in NodesInfoRequest ([#12497](https://github.com/opensearch-project/OpenSearch/pull/12497)) -- Refactor `:libs` module `bootstrap` package to eliminate top level split packages [#17117](https://github.com/opensearch-project/OpenSearch/pull/17117)) +- Refactor `:libs` module `bootstrap` package to eliminate top level split packages for JPMS support [#17117](https://github.com/opensearch-project/OpenSearch/pull/17117)) +- Refactor the codebase to eliminate top level split packages for JPMS support. [#17153](https://github.com/opensearch-project/OpenSearch/pull/17153)) ### Deprecated diff --git a/distribution/build.gradle b/distribution/build.gradle index b04b04062134f..8fe9a89059a50 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -302,7 +302,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { it.version = VersionProperties.getBundledJre(platform, architecture) it.vendor = VersionProperties.bundledJdkVendor it.architecture = architecture - } + } } } } @@ -481,7 +481,7 @@ configure(subprojects.findAll { ['archives', 'packages'].contains(it.name) }) { } } } - + jreFiles = { Project project, String platform, String architecture -> return copySpec { /* diff --git a/distribution/src/bin/opensearch-keystore b/distribution/src/bin/opensearch-keystore index 9f6cb65feeeeb..d7fad3006b037 100755 --- a/distribution/src/bin/opensearch-keystore +++ b/distribution/src/bin/opensearch-keystore @@ -2,7 +2,7 @@ set -e -o pipefail -OPENSEARCH_MAIN_CLASS=org.opensearch.common.settings.KeyStoreCli \ +OPENSEARCH_MAIN_CLASS=org.opensearch.tools.cli.keystore.KeyStoreCli \ OPENSEARCH_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/keystore-cli \ "`dirname "$0"`"/opensearch-cli \ "$@" diff --git a/distribution/src/bin/opensearch-keystore.bat b/distribution/src/bin/opensearch-keystore.bat index f6ab163199b77..de2c2168704a6 100644 --- a/distribution/src/bin/opensearch-keystore.bat +++ b/distribution/src/bin/opensearch-keystore.bat @@ -3,7 +3,7 @@ setlocal enabledelayedexpansion setlocal enableextensions -set OPENSEARCH_MAIN_CLASS=org.opensearch.common.settings.KeyStoreCli +set OPENSEARCH_MAIN_CLASS=org.opensearch.tools.cli.keystore.KeyStoreCli set OPENSEARCH_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/keystore-cli call "%~dp0opensearch-cli.bat" ^ %%* ^ diff --git a/distribution/src/bin/opensearch-plugin b/distribution/src/bin/opensearch-plugin index b58a2695d2ecf..984d573249afc 100755 --- a/distribution/src/bin/opensearch-plugin +++ b/distribution/src/bin/opensearch-plugin @@ -2,7 +2,7 @@ set -e -o pipefail -OPENSEARCH_MAIN_CLASS=org.opensearch.plugins.PluginCli \ +OPENSEARCH_MAIN_CLASS=org.opensearch.tools.cli.plugin.PluginCli \ OPENSEARCH_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli \ "`dirname "$0"`"/opensearch-cli \ "$@" diff --git a/distribution/src/bin/opensearch-plugin.bat b/distribution/src/bin/opensearch-plugin.bat index f1bd8d92aab4a..6864de9bc46d0 100644 --- a/distribution/src/bin/opensearch-plugin.bat +++ b/distribution/src/bin/opensearch-plugin.bat @@ -3,7 +3,7 @@ setlocal enabledelayedexpansion setlocal enableextensions -set OPENSEARCH_MAIN_CLASS=org.opensearch.plugins.PluginCli +set OPENSEARCH_MAIN_CLASS=org.opensearch.tools.cli.plugin.PluginCli set OPENSEARCH_ADDITIONAL_CLASSPATH_DIRECTORIES=lib/tools/plugin-cli call "%~dp0opensearch-cli.bat" ^ %%* ^ diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddFileKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommand.java similarity index 97% rename from distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddFileKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommand.java index b948be24350f4..c6725815efc4c 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddFileKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommand.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import joptsimple.OptionSpec; @@ -39,6 +39,7 @@ import org.opensearch.cli.UserException; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.nio.file.Files; diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddStringKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommand.java similarity index 97% rename from distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddStringKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommand.java index a8bc1dff8838f..74b03b86b88e0 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/AddStringKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommand.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import joptsimple.OptionSpec; @@ -38,6 +38,7 @@ import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; import org.opensearch.common.CheckedFunction; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.io.BufferedReader; diff --git a/server/src/main/java/org/opensearch/common/settings/BaseKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/BaseKeyStoreCommand.java similarity index 65% rename from server/src/main/java/org/opensearch/common/settings/BaseKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/BaseKeyStoreCommand.java index 73a2fc031fd7c..161cee9f94808 100644 --- a/server/src/main/java/org/opensearch/common/settings/BaseKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/BaseKeyStoreCommand.java @@ -30,14 +30,14 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.opensearch.cli.ExitCodes; -import org.opensearch.cli.KeyStoreAwareCommand; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; @@ -53,13 +53,36 @@ public abstract class BaseKeyStoreCommand extends KeyStoreAwareCommand { private KeyStoreWrapper keyStore; private SecureString keyStorePassword; private final boolean keyStoreMustExist; - OptionSpec forceOption; + /** + * Option to force operations without prompting for confirmation. + * When specified, operations proceed without asking for user input. + */ + protected OptionSpec forceOption; + + /** + * Creates a new BaseKeyStoreCommand with the specified description and existence requirement. + * + * @param description The description of the command + * @param keyStoreMustExist If true, the keystore must exist before executing the command. + * If false, a new keystore may be created if none exists. + */ public BaseKeyStoreCommand(String description, boolean keyStoreMustExist) { super(description); this.keyStoreMustExist = keyStoreMustExist; } + /** + * Executes the keystore command by loading/creating the keystore and handling password management. + * If the keystore doesn't exist and keyStoreMustExist is false, prompts to create a new one + * unless the force option is specified. + * + * @param terminal The terminal to use for user interaction + * @param options The command-line options provided + * @param env The environment settings + * @throws Exception if there are errors during keystore operations + * @throws UserException if the keystore is required but doesn't exist + */ @Override protected final void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { try { @@ -96,19 +119,34 @@ protected final void execute(Terminal terminal, OptionSet options, Environment e } } + /** + * Gets the current keystore instance. + * + * @return The current {@link KeyStoreWrapper} instance being operated on + */ protected KeyStoreWrapper getKeyStore() { return keyStore; } + /** + * Gets the password for the current keystore. + * + * @return The {@link SecureString} containing the keystore password + */ protected SecureString getKeyStorePassword() { return keyStorePassword; } /** - * This is called after the keystore password has been read from the stdin and the keystore is decrypted and - * loaded. The keystore and keystore passwords are available to classes extending {@link BaseKeyStoreCommand} - * using {@link BaseKeyStoreCommand#getKeyStore()} and {@link BaseKeyStoreCommand#getKeyStorePassword()} - * respectively. + * Executes the specific keystore command implementation. + * This is called after the keystore password has been read and the keystore + * is decrypted and loaded. The keystore and keystore passwords are available using + * {@link #getKeyStore()} and {@link #getKeyStorePassword()} respectively. + * + * @param terminal The terminal to use for user interaction + * @param options The command line options that were specified + * @param env The environment configuration + * @throws Exception if there is an error executing the command */ protected abstract void executeCommand(Terminal terminal, OptionSet options, Environment env) throws Exception; } diff --git a/server/src/main/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommand.java similarity index 70% rename from server/src/main/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommand.java index 74e09f6f233d5..79c8a0a25b916 100644 --- a/server/src/main/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommand.java @@ -30,12 +30,13 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; @@ -44,12 +45,27 @@ * * @opensearch.internal */ -class ChangeKeyStorePasswordCommand extends BaseKeyStoreCommand { +public class ChangeKeyStorePasswordCommand extends BaseKeyStoreCommand { ChangeKeyStorePasswordCommand() { super("Changes the password of a keystore", true); } + /** + * Executes the password change command by prompting for a new password + * and saving the keystore with the updated password. + *

+ * This implementation will: + * 1. Prompt for a new password with verification + * 2. Save the keystore with the new password + * 3. Display a success message upon completion + * + * @param terminal The terminal to use for user interaction + * @param options The command-line options provided + * @param env The environment settings containing configuration directory + * @throws Exception if there are errors during password change + * @throws UserException if there are security-related errors + */ @Override protected void executeCommand(Terminal terminal, OptionSet options, Environment env) throws Exception { try (SecureString newPassword = readPassword(terminal, true)) { diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/CreateKeyStoreCommand.java similarity index 96% rename from distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/CreateKeyStoreCommand.java index a4ab98ce730ee..647b6aa6dbb85 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/CreateKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/CreateKeyStoreCommand.java @@ -30,14 +30,14 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.opensearch.cli.ExitCodes; -import org.opensearch.cli.KeyStoreAwareCommand; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; diff --git a/server/src/main/java/org/opensearch/common/settings/HasPasswordKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/HasPasswordKeyStoreCommand.java similarity index 70% rename from server/src/main/java/org/opensearch/common/settings/HasPasswordKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/HasPasswordKeyStoreCommand.java index 59d8a44846e11..079a78fc41b14 100644 --- a/server/src/main/java/org/opensearch/common/settings/HasPasswordKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/HasPasswordKeyStoreCommand.java @@ -30,18 +30,19 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; -import org.opensearch.cli.KeyStoreAwareCommand; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.nio.file.Path; /** - * KeyStore command that has a password. + * KeyStore command that checks if the keystore exists and is password-protected. + * Exits with a non-zero status code if the keystore is missing or not password-protected. * * @opensearch.internal */ @@ -49,12 +50,29 @@ public class HasPasswordKeyStoreCommand extends KeyStoreAwareCommand { static final int NO_PASSWORD_EXIT_CODE = 1; + /** + * Creates a new HasPasswordKeyStoreCommand. + * This command checks for the existence of a password-protected keystore + * and exits with {@link #NO_PASSWORD_EXIT_CODE} if the keystore is missing + * or not password-protected. + */ HasPasswordKeyStoreCommand() { super( "Succeeds if the keystore exists and is password-protected, " + "fails with exit code " + NO_PASSWORD_EXIT_CODE + " otherwise." ); } + /** + * Executes the password check command by verifying if the keystore exists + * and is password-protected. + * + * @param terminal The terminal for user interaction and output + * @param options The command-line options provided + * @param env The environment settings containing configuration directory + * @throws UserException with {@link #NO_PASSWORD_EXIT_CODE} if the keystore + * is missing or not password-protected + * @throws Exception if there are other errors during execution + */ @Override protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { final Path configFile = env.configDir(); diff --git a/server/src/main/java/org/opensearch/cli/KeyStoreAwareCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/KeyStoreAwareCommand.java similarity index 70% rename from server/src/main/java/org/opensearch/cli/KeyStoreAwareCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/KeyStoreAwareCommand.java index 6cd266252b369..2deec53c788dc 100644 --- a/server/src/main/java/org/opensearch/cli/KeyStoreAwareCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/KeyStoreAwareCommand.java @@ -30,9 +30,13 @@ * GitHub history for details. */ -package org.opensearch.cli; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; +import org.opensearch.cli.ExitCodes; +import org.opensearch.cli.Terminal; +import org.opensearch.cli.UserException; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; @@ -42,19 +46,22 @@ import java.util.Arrays; /** - * An {@link org.opensearch.cli.EnvironmentAwareCommand} that needs to access the opensearch keystore, possibly + * An {@link EnvironmentAwareCommand} that needs to access the opensearch keystore, possibly * decrypting it if it is password protected. * * @opensearch.internal */ public abstract class KeyStoreAwareCommand extends EnvironmentAwareCommand { + + /** + * Creates a new KeyStoreAwareCommand with the given description. + * + * @param description A description of the command's purpose and functionality + */ public KeyStoreAwareCommand(String description) { super(description); } - /** Arbitrarily chosen maximum passphrase length */ - public static final int MAX_PASSPHRASE_LENGTH = 128; - /** * Reads the keystore password from the {@link Terminal}, prompting for verification where applicable and returns it as a * {@link SecureString}. @@ -69,9 +76,9 @@ protected static SecureString readPassword(Terminal terminal, boolean withVerifi if (withVerification) { passwordArray = terminal.readSecret( "Enter new password for the opensearch keystore (empty for no password): ", - MAX_PASSPHRASE_LENGTH + EnvironmentAwareCommand.MAX_PASSPHRASE_LENGTH ); - char[] passwordVerification = terminal.readSecret("Enter same password again: ", MAX_PASSPHRASE_LENGTH); + char[] passwordVerification = terminal.readSecret("Enter same password again: ", EnvironmentAwareCommand.MAX_PASSPHRASE_LENGTH); if (Arrays.equals(passwordArray, passwordVerification) == false) { throw new UserException(ExitCodes.DATA_ERROR, "Passwords are not equal, exiting."); } @@ -83,7 +90,15 @@ protected static SecureString readPassword(Terminal terminal, boolean withVerifi } /** - * Decrypt the {@code keyStore}, prompting the user to enter the password in the {@link Terminal} if it is password protected + * Decrypts the provided keystore using a password obtained from the terminal. + * If the keystore is password-protected, prompts the user to enter the password. + * If not password-protected, uses an empty password. + * + * @param keyStore The keystore to decrypt + * @param terminal The terminal to use for password input + * @throws UserException If there is an error with the provided password + * @throws GeneralSecurityException If there is an error during decryption + * @throws IOException If there is an error reading from the terminal */ protected static void decryptKeyStore(KeyStoreWrapper keyStore, Terminal terminal) throws UserException, GeneralSecurityException, IOException { @@ -94,5 +109,13 @@ protected static void decryptKeyStore(KeyStoreWrapper keyStore, Terminal termina } } + /** + * Executes the keystore command with the given parameters. + * + * @param terminal The terminal to use for user interaction + * @param options The command-line options provided + * @param env The environment settings + * @throws Exception if there are any errors during execution + */ protected abstract void execute(Terminal terminal, OptionSet options, Environment env) throws Exception; } diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/KeyStoreCli.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/KeyStoreCli.java similarity index 95% rename from distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/KeyStoreCli.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/KeyStoreCli.java index 7a772526cd66b..4c7807be6a859 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/KeyStoreCli.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/KeyStoreCli.java @@ -30,10 +30,10 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; -import org.opensearch.cli.LoggingAwareMultiCommand; import org.opensearch.cli.Terminal; +import org.opensearch.common.cli.LoggingAwareMultiCommand; /** * A CLI tool for managing secrets in the OpenSearch keystore. diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/ListKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommand.java similarity index 95% rename from distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/ListKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommand.java index 379b61efc5d32..0e53a03a3c880 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/ListKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommand.java @@ -30,10 +30,11 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import org.opensearch.cli.Terminal; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.util.ArrayList; diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommand.java similarity index 96% rename from distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommand.java index c57959117af15..aa05db0a9475b 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommand.java @@ -30,13 +30,14 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import joptsimple.OptionSpec; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.util.List; diff --git a/server/src/main/java/org/opensearch/common/settings/UpgradeKeyStoreCommand.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/UpgradeKeyStoreCommand.java similarity index 64% rename from server/src/main/java/org/opensearch/common/settings/UpgradeKeyStoreCommand.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/UpgradeKeyStoreCommand.java index 46da933e3bf11..22dfbd7a48925 100644 --- a/server/src/main/java/org/opensearch/common/settings/UpgradeKeyStoreCommand.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/UpgradeKeyStoreCommand.java @@ -30,10 +30,11 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import joptsimple.OptionSet; import org.opensearch.cli.Terminal; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; /** @@ -43,10 +44,26 @@ */ public class UpgradeKeyStoreCommand extends BaseKeyStoreCommand { + /** + * Creates a new UpgradeKeyStoreCommand instance. + * Initializes a command that requires an existing keystore to upgrade its format + * to the latest version. This command will fail if the keystore doesn't exist. + */ UpgradeKeyStoreCommand() { super("Upgrade the keystore format", true); } + /** + * Executes the keystore upgrade command by upgrading the format of an existing keystore. + * Uses the current keystore and its password to create a new keystore with an upgraded format + * in the same location. + * + * @param terminal The terminal for user interaction and output messages + * @param options The command-line options provided + * @param env The environment settings containing the configuration directory + * @throws Exception if there are any errors during the upgrade process, + * such as IO errors or encryption/decryption issues + */ @Override protected void executeCommand(final Terminal terminal, final OptionSet options, final Environment env) throws Exception { KeyStoreWrapper.upgrade(getKeyStore(), env.configDir(), getKeyStorePassword().getChars()); diff --git a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/package-info.java b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/package-info.java similarity index 87% rename from distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/package-info.java rename to distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/package-info.java index 3969fb4f91e49..aa1a2075194fd 100644 --- a/distribution/tools/keystore-cli/src/main/java/org/opensearch/common/settings/package-info.java +++ b/distribution/tools/keystore-cli/src/main/java/org/opensearch/tools/cli/keystore/package-info.java @@ -9,4 +9,4 @@ /** * Classes implementing a CLI tool for managing secrets in the OpenSearch keystore. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java index e9219de218aef..149145bb2d66b 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/bootstrap/BootstrapTests.java @@ -31,7 +31,6 @@ package org.opensearch.bootstrap; -import org.opensearch.common.settings.KeyStoreCommandTestCase; import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; @@ -39,6 +38,7 @@ import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.tools.cli.keystore.KeyStoreCommandTestCase; import org.junit.After; import org.junit.Before; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddFileKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommandTests.java similarity index 99% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddFileKeyStoreCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommandTests.java index b3cc7e10fdf8c..3d188590d5c47 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddFileKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddFileKeyStoreCommandTests.java @@ -30,12 +30,13 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.UserException; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.io.IOException; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddStringKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommandTests.java similarity index 99% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddStringKeyStoreCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommandTests.java index 059c74ed8971c..22012d1f44986 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/AddStringKeyStoreCommandTests.java @@ -30,11 +30,12 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.io.ByteArrayInputStream; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommandTests.java similarity index 99% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommandTests.java index 55dbf59c8ad86..1ce57332a9a31 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ChangeKeyStorePasswordCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ChangeKeyStorePasswordCommandTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/CreateKeyStoreCommandTests.java similarity index 97% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/CreateKeyStoreCommandTests.java index f554f17c50813..5a06bc2400176 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/CreateKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/CreateKeyStoreCommandTests.java @@ -30,11 +30,12 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.nio.charset.StandardCharsets; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/HasPasswordKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/HasPasswordKeyStoreCommandTests.java similarity index 98% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/HasPasswordKeyStoreCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/HasPasswordKeyStoreCommandTests.java index 3b5abf30c4b85..9ebc92c55530b 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/HasPasswordKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/HasPasswordKeyStoreCommandTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.UserException; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreCommandTestCase.java similarity index 97% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreCommandTestCase.java index 0ac653d35b07e..b50d1f9c20081 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreCommandTestCase.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreCommandTestCase.java @@ -30,13 +30,15 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.cli.CommandTestCase; import org.opensearch.common.io.PathUtilsForTesting; +import org.opensearch.common.settings.KeyStoreWrapper; +import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreWrapperTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreWrapperTests.java similarity index 99% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreWrapperTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreWrapperTests.java index 15771945fa199..efb833e8fd94a 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/KeyStoreWrapperTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/KeyStoreWrapperTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.apache.lucene.backward_codecs.store.EndiannessReverserUtil; import org.apache.lucene.codecs.CodecUtil; @@ -39,6 +39,7 @@ import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.NIOFSDirectory; import org.opensearch.common.Randomness; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.settings.SecureString; import org.opensearch.env.Environment; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ListKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommandTests.java similarity index 98% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ListKeyStoreCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommandTests.java index 42452d5c12beb..0846e28fb42af 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/ListKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/ListKeyStoreCommandTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommandTests.java similarity index 99% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommandTests.java index 5dab6eec121bd..66d448400d4e3 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/RemoveSettingKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/RemoveSettingKeyStoreCommandTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.ExitCodes; diff --git a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/UpgradeKeyStoreCommandTests.java b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/UpgradeKeyStoreCommandTests.java similarity index 97% rename from distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/UpgradeKeyStoreCommandTests.java rename to distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/UpgradeKeyStoreCommandTests.java index 0fda83282c1f9..e27762de77020 100644 --- a/distribution/tools/keystore-cli/src/test/java/org/opensearch/common/settings/UpgradeKeyStoreCommandTests.java +++ b/distribution/tools/keystore-cli/src/test/java/org/opensearch/tools/cli/keystore/UpgradeKeyStoreCommandTests.java @@ -30,10 +30,11 @@ * GitHub history for details. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.keystore; import org.opensearch.cli.Command; import org.opensearch.cli.UserException; +import org.opensearch.common.settings.KeyStoreWrapper; import org.opensearch.env.Environment; import java.io.InputStream; diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java similarity index 98% rename from distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java index d5a0102ba86af..1ab2697d5ced8 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/InstallPluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/InstallPluginCommand.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import joptsimple.OptionSet; import joptsimple.OptionSpec; @@ -52,16 +52,19 @@ import org.bouncycastle.openpgp.operator.jcajce.JcaPGPContentVerifierBuilderProvider; import org.opensearch.Build; import org.opensearch.Version; -import org.opensearch.cli.EnvironmentAwareCommand; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.bootstrap.JarHell; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.common.collect.Tuple; import org.opensearch.common.hash.MessageDigests; import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.Environment; +import org.opensearch.plugins.Platforms; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.plugins.PluginsService; import java.io.BufferedReader; import java.io.IOException; @@ -92,7 +95,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.Enumeration; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -827,17 +829,7 @@ void jarHellCheck(PluginInfo candidateInfo, Path candidateDir, Path pluginsDir, }).collect(Collectors.toSet()); // read existing bundles. this does some checks on the installation too. - Set bundles = new HashSet<>(PluginsService.getPluginBundles(pluginsDir)); - bundles.addAll(PluginsService.getModuleBundles(modulesDir)); - bundles.add(new PluginsService.Bundle(candidateInfo, candidateDir)); - List sortedBundles = PluginsService.sortBundles(bundles); - - // check jarhell of all plugins so we know this plugin and anything depending on it are ok together - // TODO: optimize to skip any bundles not connected to the candidate plugin? - Map> transitiveUrls = new HashMap<>(); - for (PluginsService.Bundle bundle : sortedBundles) { - PluginsService.checkBundleJarHell(classpath, bundle, transitiveUrls); - } + PluginsService.checkJarHellForPlugin(classpath, candidateInfo, candidateDir, pluginsDir, modulesDir); // TODO: no jars should be an error // TODO: verify the classname exists in one of the jars! diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/ListPluginsCommand.java similarity index 94% rename from distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/ListPluginsCommand.java index 9ca42ac5f4ec1..a54c02651549c 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ListPluginsCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/ListPluginsCommand.java @@ -30,13 +30,15 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import joptsimple.OptionSet; import org.opensearch.Version; -import org.opensearch.cli.EnvironmentAwareCommand; import org.opensearch.cli.Terminal; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.env.Environment; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.plugins.PluginsService; import java.io.IOException; import java.nio.file.DirectoryStream; diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginCli.java similarity index 95% rename from distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginCli.java index f87b72c7ecb5f..929b8bfe59ddb 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginCli.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginCli.java @@ -30,11 +30,11 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import org.opensearch.cli.Command; -import org.opensearch.cli.LoggingAwareMultiCommand; import org.opensearch.cli.Terminal; +import org.opensearch.common.cli.LoggingAwareMultiCommand; import org.opensearch.common.util.io.IOUtils; import java.io.IOException; diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginHelper.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginHelper.java similarity index 95% rename from distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginHelper.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginHelper.java index 13d8ab62c1f8d..334f0f4f14924 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/PluginHelper.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginHelper.java @@ -6,7 +6,9 @@ * compatible open source license. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; + +import org.opensearch.plugins.PluginInfo; import java.io.IOException; import java.nio.file.Files; diff --git a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginSecurity.java similarity index 98% rename from server/src/main/java/org/opensearch/plugins/PluginSecurity.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginSecurity.java index 1bf8642d1112f..81d7824812361 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginSecurity.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/PluginSecurity.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; @@ -136,7 +136,7 @@ static String formatPermission(Permission permission) { * Parses plugin policy into a set of permissions. Each permission is formatted for output to users. */ @SuppressWarnings("removal") - public static Set parsePermissions(Path file, Path tmpDir) throws IOException { + static Set parsePermissions(Path file, Path tmpDir) throws IOException { // create a zero byte file for "comparison" // this is necessary because the default policy impl automatically grants two permissions: // 1. permission to exitVM (which we ignore) diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/ProgressInputStream.java similarity index 98% rename from distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/ProgressInputStream.java index 02be3dbc82a44..7f79ba3f4f4df 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/ProgressInputStream.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/ProgressInputStream.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import java.io.FilterInputStream; import java.io.IOException; diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/RemovePluginCommand.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/RemovePluginCommand.java similarity index 94% rename from distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/RemovePluginCommand.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/RemovePluginCommand.java index 4ac99c816717a..2478a05c3a9cb 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/RemovePluginCommand.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/RemovePluginCommand.java @@ -30,16 +30,17 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import joptsimple.OptionSet; import joptsimple.OptionSpec; -import org.opensearch.cli.EnvironmentAwareCommand; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.Environment; +import org.opensearch.plugins.PluginsService; import java.io.IOException; import java.nio.file.FileAlreadyExistsException; @@ -49,7 +50,6 @@ import java.util.Arrays; import java.util.List; import java.util.Locale; -import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -98,15 +98,7 @@ void execute(Terminal terminal, Environment env, String pluginName, boolean purg } // first make sure nothing extends this plugin - List usedBy = new ArrayList<>(); - Set bundles = PluginsService.getPluginBundles(env.pluginsDir()); - for (PluginsService.Bundle bundle : bundles) { - for (String extendedPlugin : bundle.plugin.getExtendedPlugins()) { - if (extendedPlugin.equals(pluginName)) { - usedBy.add(bundle.plugin.getName()); - } - } - } + List usedBy = PluginsService.findPluginsByDependency(env.pluginsDir(), pluginName); if (usedBy.isEmpty() == false) { throw new UserException( PLUGIN_STILL_USED, diff --git a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/package-info.java b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/package-info.java similarity index 87% rename from distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/package-info.java rename to distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/package-info.java index b762e59ae8095..b676f8db01a84 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/opensearch/plugins/package-info.java +++ b/distribution/tools/plugin-cli/src/main/java/org/opensearch/tools/cli/plugin/package-info.java @@ -9,4 +9,4 @@ /** * Classes implementing a CLI tool for managing plugins in OpenSearch. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java similarity index 99% rename from distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java rename to distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java index e6c1070f36066..56ef09c5c9128 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/InstallPluginCommandTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; @@ -70,6 +70,9 @@ import org.opensearch.core.util.FileSystemUtils; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; +import org.opensearch.plugins.Platforms; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.plugins.PluginTestUtil; import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.PosixPermissionsResetter; diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ListPluginsCommandTests.java similarity index 98% rename from distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java rename to distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ListPluginsCommandTests.java index 6878efce4c804..7fcdab907cbf0 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ListPluginsCommandTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.LegacyESVersion; @@ -41,6 +41,8 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.plugins.PluginTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ProgressInputStreamTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ProgressInputStreamTests.java similarity index 98% rename from distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ProgressInputStreamTests.java rename to distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ProgressInputStreamTests.java index 13af003591cfa..d045c4256f08c 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/ProgressInputStreamTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/ProgressInputStreamTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import org.opensearch.test.OpenSearchTestCase; diff --git a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/RemovePluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/RemovePluginCommandTests.java similarity index 99% rename from distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/RemovePluginCommandTests.java rename to distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/RemovePluginCommandTests.java index ab23dfad75683..10d5a376d8289 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/opensearch/plugins/RemovePluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/opensearch/tools/cli/plugin/RemovePluginCommandTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.Version; @@ -40,6 +40,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.env.TestEnvironment; +import org.opensearch.plugins.PluginTestUtil; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.junit.Before; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/DetectEsInstallationTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTask.java similarity index 99% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/DetectEsInstallationTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTask.java index 90067ffd221bf..de2d24dc61ce1 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/DetectEsInstallationTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTask.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import com.fasterxml.jackson.databind.ObjectMapper; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportJvmOptionsTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportJvmOptionsTask.java similarity index 97% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportJvmOptionsTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportJvmOptionsTask.java index 555c553578155..fcb23f02425c6 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportJvmOptionsTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportJvmOptionsTask.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.Terminal; import org.opensearch.common.collect.Tuple; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportKeystoreTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportKeystoreTask.java similarity index 96% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportKeystoreTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportKeystoreTask.java index 5d35a7fcc0463..f2f36ab3bbe0e 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportKeystoreTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportKeystoreTask.java @@ -6,12 +6,11 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.Terminal; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.KeyStoreWrapper; -import org.opensearch.common.settings.KeystoreWrapperUtil; import org.opensearch.core.common.settings.SecureString; import java.io.InputStream; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportLog4jPropertiesTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTask.java similarity index 98% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportLog4jPropertiesTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTask.java index 7934ba43ccfaa..da999a3f91f08 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportLog4jPropertiesTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTask.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.Terminal; import org.opensearch.common.collect.Tuple; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportYmlConfigTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTask.java similarity index 99% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportYmlConfigTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTask.java index 6e29bd2d04239..2ba8f93cd53ac 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ImportYmlConfigTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTask.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.Terminal; import org.opensearch.common.collect.Tuple; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/InstallPluginsTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/InstallPluginsTask.java similarity index 99% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/InstallPluginsTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/InstallPluginsTask.java index 781a56e4b53d9..ae2335752bed0 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/InstallPluginsTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/InstallPluginsTask.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.Terminal; import org.opensearch.common.collect.Tuple; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/common/settings/KeystoreWrapperUtil.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/KeystoreWrapperUtil.java similarity index 89% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/common/settings/KeystoreWrapperUtil.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/KeystoreWrapperUtil.java index f9300b6555478..3ef7d09edc046 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/common/settings/KeystoreWrapperUtil.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/KeystoreWrapperUtil.java @@ -6,7 +6,9 @@ * compatible open source license. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.upgrade; + +import org.opensearch.common.settings.KeyStoreWrapper; /** * Utility that has package level access to the {@link KeyStoreWrapper} for diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/TaskInput.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/TaskInput.java similarity index 98% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/TaskInput.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/TaskInput.java index 2fbd5d9a0fa7c..b3c5604275b54 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/TaskInput.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/TaskInput.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.Version; import org.opensearch.env.Environment; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeCli.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeCli.java similarity index 95% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeCli.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeCli.java index e625ad333d218..f609d06b8ed34 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeCli.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeCli.java @@ -6,13 +6,13 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import joptsimple.OptionSet; -import org.opensearch.cli.EnvironmentAwareCommand; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.common.collect.Tuple; import org.opensearch.env.Environment; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeTask.java similarity index 97% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeTask.java index 708f644bcdeb6..8f84dd8c9817c 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/UpgradeTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/UpgradeTask.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.Terminal; import org.opensearch.common.collect.Tuple; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ValidateInputTask.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ValidateInputTask.java similarity index 98% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ValidateInputTask.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ValidateInputTask.java index 95e055cedda43..bea524e651827 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/ValidateInputTask.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/ValidateInputTask.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.LegacyESVersion; import org.opensearch.Version; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/common/settings/package-info.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/package-info.java similarity index 87% rename from distribution/tools/upgrade-cli/src/main/java/org/opensearch/common/settings/package-info.java rename to distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/package-info.java index b3a4e9512639a..86ecef5ae5576 100644 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/common/settings/package-info.java +++ b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/tools/cli/upgrade/package-info.java @@ -9,4 +9,4 @@ /** * This exists to get access to the package level methods of KeyStoreWrapper. */ -package org.opensearch.common.settings; +package org.opensearch.tools.cli.upgrade; diff --git a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/package-info.java b/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/package-info.java deleted file mode 100644 index d66b1c9503d9b..0000000000000 --- a/distribution/tools/upgrade-cli/src/main/java/org/opensearch/upgrade/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/** - * This package contains the classes for the upgrade CLI tool. - * This tool automates the configuring of a node that is to be upgraded to - * OpenSearch from an existing Elasticsearch (v7.10.2 and v6.8.0) installation. - */ - -/** - * Contains the classes which implement a CLI tool for opensearch-upgrade which is - * bundled into the distribution and available inside $ES_HOME/bin. - */ -package org.opensearch.upgrade; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/DetectEsInstallationTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTaskTests.java similarity index 98% rename from distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/DetectEsInstallationTaskTests.java rename to distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTaskTests.java index a1391ba70a8e8..e653b3fa8d284 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/DetectEsInstallationTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/DetectEsInstallationTaskTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.MockTerminal; import org.opensearch.cli.Terminal; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTaskTests.java similarity index 98% rename from distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java rename to distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTaskTests.java index 529253c9ce824..8136504b15462 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportLog4jPropertiesTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportLog4jPropertiesTaskTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportYmlConfigTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTaskTests.java similarity index 98% rename from distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportYmlConfigTaskTests.java rename to distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTaskTests.java index be03470b201a1..38dfebce7f21b 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ImportYmlConfigTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ImportYmlConfigTaskTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/InstallPluginsTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/InstallPluginsTaskTests.java similarity index 98% rename from distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/InstallPluginsTaskTests.java rename to distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/InstallPluginsTaskTests.java index 46e189a4765d0..e7ac68331a8ca 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/InstallPluginsTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/InstallPluginsTaskTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.cli.MockTerminal; import org.opensearch.common.collect.Tuple; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/UpgradeCliTests.java similarity index 99% rename from distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java rename to distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/UpgradeCliTests.java index a139480c71a2f..c1d1ca55ca315 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/UpgradeCliTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/UpgradeCliTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import com.google.common.jimfs.Configuration; import com.google.common.jimfs.Jimfs; diff --git a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ValidateInputTaskTests.java similarity index 98% rename from distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java rename to distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ValidateInputTaskTests.java index b9a536afb1361..91d708984deb4 100644 --- a/distribution/tools/upgrade-cli/src/test/java/org/opensearch/upgrade/ValidateInputTaskTests.java +++ b/distribution/tools/upgrade-cli/src/test/java/org/opensearch/tools/cli/upgrade/ValidateInputTaskTests.java @@ -6,7 +6,7 @@ * compatible open source license. */ -package org.opensearch.upgrade; +package org.opensearch.tools.cli.upgrade; import org.opensearch.LegacyESVersion; import org.opensearch.cli.MockTerminal; diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index a58c4dafac874..2e5d84a0cab0b 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -33,11 +33,11 @@ apply plugin: 'opensearch.publish' dependencies { api 'net.sf.jopt-simple:jopt-simple:5.0.4' api project(':libs:opensearch-common') -} -test.enabled = false -// Since CLI does not depend on :server, it cannot run the jarHell task -jarHell.enabled = false + testImplementation(project(":test:framework")) { + exclude group: 'org.opensearch', module: 'opensearch-cli' + } +} tasks.named('forbiddenApisMain').configure { replaceSignatureFiles 'jdk-signatures' diff --git a/libs/cli/src/main/java/org/opensearch/cli/Command.java b/libs/cli/src/main/java/org/opensearch/cli/Command.java index cc9230bdb2282..c2ae43946a8c8 100644 --- a/libs/cli/src/main/java/org/opensearch/cli/Command.java +++ b/libs/cli/src/main/java/org/opensearch/cli/Command.java @@ -119,7 +119,7 @@ public final int main(String[] args, Terminal terminal) throws Exception { /** * Executes the command, but all errors are thrown. */ - void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { + protected void mainWithoutErrorHandling(String[] args, Terminal terminal) throws Exception { final OptionSet options = parser.parse(args); if (options.has(helpOption)) { diff --git a/server/src/test/java/org/opensearch/cli/CommandTests.java b/libs/cli/src/test/java/org/opensearch/cli/CommandTests.java similarity index 100% rename from server/src/test/java/org/opensearch/cli/CommandTests.java rename to libs/cli/src/test/java/org/opensearch/cli/CommandTests.java diff --git a/server/src/test/java/org/opensearch/cli/MultiCommandTests.java b/libs/cli/src/test/java/org/opensearch/cli/MultiCommandTests.java similarity index 100% rename from server/src/test/java/org/opensearch/cli/MultiCommandTests.java rename to libs/cli/src/test/java/org/opensearch/cli/MultiCommandTests.java diff --git a/server/src/test/java/org/opensearch/cli/TerminalTests.java b/libs/cli/src/test/java/org/opensearch/cli/TerminalTests.java similarity index 100% rename from server/src/test/java/org/opensearch/cli/TerminalTests.java rename to libs/cli/src/test/java/org/opensearch/cli/TerminalTests.java diff --git a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java b/libs/plugin-classloader/src/main/java/org/opensearch/plugin/classloader/ExtendedPluginsClassLoader.java similarity index 98% rename from libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java rename to libs/plugin-classloader/src/main/java/org/opensearch/plugin/classloader/ExtendedPluginsClassLoader.java index 969fa91b50538..217acb0d56db1 100644 --- a/libs/plugin-classloader/src/main/java/org/opensearch/plugins/ExtendedPluginsClassLoader.java +++ b/libs/plugin-classloader/src/main/java/org/opensearch/plugin/classloader/ExtendedPluginsClassLoader.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.plugin.classloader; import java.security.AccessController; import java.security.PrivilegedAction; diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle index 91cb4c7151a69..681ca0c712bb2 100644 --- a/qa/evil-tests/build.gradle +++ b/qa/evil-tests/build.gradle @@ -41,6 +41,7 @@ apply plugin: 'opensearch.standalone-test' dependencies { testImplementation 'com.google.jimfs:jimfs:1.3.0' + testImplementation project(':distribution:tools:plugin-cli') } // TODO: give each evil test its own fresh JVM for more isolation. diff --git a/qa/evil-tests/src/test/java/org/opensearch/cli/EvilEnvironmentAwareCommandTests.java b/qa/evil-tests/src/test/java/org/opensearch/cli/EvilEnvironmentAwareCommandTests.java index 2d232e9694de6..b4164d40081f2 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/cli/EvilEnvironmentAwareCommandTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/cli/EvilEnvironmentAwareCommandTests.java @@ -35,6 +35,7 @@ import joptsimple.OptionSet; import org.apache.lucene.tests.util.TestRuleRestoreSystemProperties; import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.env.Environment; import org.opensearch.test.OpenSearchTestCase; import org.junit.Rule; diff --git a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java b/qa/evil-tests/src/test/java/org/opensearch/tools/cli/plugin/PluginSecurityTests.java similarity index 92% rename from qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java rename to qa/evil-tests/src/test/java/org/opensearch/tools/cli/plugin/PluginSecurityTests.java index 04eae95f6fe12..fc210d2ba152d 100644 --- a/qa/evil-tests/src/test/java/org/opensearch/plugins/PluginSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/opensearch/tools/cli/plugin/PluginSecurityTests.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.plugins; +package org.opensearch.tools.cli.plugin; import org.opensearch.test.OpenSearchTestCase; @@ -50,7 +50,7 @@ public void testParsePermissions() throws Exception { "test cannot run with security manager enabled", System.getSecurityManager() == null); Path scratch = createTempDir(); - Path testFile = this.getDataPath("security/simple-plugin-security.policy"); + Path testFile = this.getDataPath("simple-plugin-security.policy"); Set actual = PluginSecurity.parsePermissions(testFile, scratch); assertThat(actual, contains(PluginSecurity.formatPermission(new RuntimePermission("queuePrintJob")))); } @@ -61,7 +61,7 @@ public void testParseTwoPermissions() throws Exception { "test cannot run with security manager enabled", System.getSecurityManager() == null); Path scratch = createTempDir(); - Path testFile = this.getDataPath("security/complex-plugin-security.policy"); + Path testFile = this.getDataPath("complex-plugin-security.policy"); Set actual = PluginSecurity.parsePermissions(testFile, scratch); assertThat(actual, containsInAnyOrder( PluginSecurity.formatPermission(new RuntimePermission("getClassLoader")), @@ -81,7 +81,7 @@ public void testFormatUnresolvedPermission() throws Exception { "test cannot run with security manager enabled", System.getSecurityManager() == null); Path scratch = createTempDir(); - Path testFile = this.getDataPath("security/unresolved-plugin-security.policy"); + Path testFile = this.getDataPath("unresolved-plugin-security.policy"); Set permissions = PluginSecurity.parsePermissions(testFile, scratch); assertThat(permissions, contains("org.fake.FakePermission fakeName")); } diff --git a/qa/evil-tests/src/test/resources/org/opensearch/plugins/security/complex-plugin-security.policy b/qa/evil-tests/src/test/resources/org/opensearch/tools/cli/plugin/complex-plugin-security.policy similarity index 100% rename from qa/evil-tests/src/test/resources/org/opensearch/plugins/security/complex-plugin-security.policy rename to qa/evil-tests/src/test/resources/org/opensearch/tools/cli/plugin/complex-plugin-security.policy diff --git a/qa/evil-tests/src/test/resources/org/opensearch/plugins/security/simple-plugin-security.policy b/qa/evil-tests/src/test/resources/org/opensearch/tools/cli/plugin/simple-plugin-security.policy similarity index 100% rename from qa/evil-tests/src/test/resources/org/opensearch/plugins/security/simple-plugin-security.policy rename to qa/evil-tests/src/test/resources/org/opensearch/tools/cli/plugin/simple-plugin-security.policy diff --git a/qa/evil-tests/src/test/resources/org/opensearch/plugins/security/unresolved-plugin-security.policy b/qa/evil-tests/src/test/resources/org/opensearch/tools/cli/plugin/unresolved-plugin-security.policy similarity index 100% rename from qa/evil-tests/src/test/resources/org/opensearch/plugins/security/unresolved-plugin-security.policy rename to qa/evil-tests/src/test/resources/org/opensearch/tools/cli/plugin/unresolved-plugin-security.policy diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java index 14346b8910c76..3936a142945a4 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/IdentityAuthenticationIT.java @@ -17,7 +17,6 @@ import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.identity.shiro.ShiroIdentityPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.core.rest.RestStatus; diff --git a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java index 95498f2bcbcd1..b40ac67b04917 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java @@ -42,12 +42,12 @@ import org.apache.lucene.util.StringHelper; import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.cli.KeyStoreAwareCommand; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; import org.opensearch.common.PidFile; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.bootstrap.JarHell; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.common.inject.CreationException; import org.opensearch.common.logging.LogConfigurator; import org.opensearch.common.logging.Loggers; @@ -263,7 +263,7 @@ static SecureSettings loadSecureSettings(Environment initialEnv) throws Bootstra SecureString password; try { if (keystore != null && keystore.hasPassword()) { - password = readPassphrase(System.in, KeyStoreAwareCommand.MAX_PASSPHRASE_LENGTH); + password = readPassphrase(System.in, EnvironmentAwareCommand.MAX_PASSPHRASE_LENGTH); } else { password = new SecureString(new char[0]); } diff --git a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java index 8eb4f841b9671..162b9be318cd5 100644 --- a/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java +++ b/server/src/main/java/org/opensearch/bootstrap/OpenSearch.java @@ -37,10 +37,10 @@ import joptsimple.OptionSpecBuilder; import joptsimple.util.PathConverter; import org.opensearch.Build; -import org.opensearch.cli.EnvironmentAwareCommand; import org.opensearch.cli.ExitCodes; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.common.logging.LogConfigurator; import org.opensearch.env.Environment; import org.opensearch.monitor.jvm.JvmInfo; diff --git a/server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java b/server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java deleted file mode 100644 index 07a6b7d523a33..0000000000000 --- a/server/src/main/java/org/opensearch/cli/LoggingAwareCommand.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.cli; - -/** - * A command that is aware of logging. This class should be preferred over the base {@link Command} class for any CLI tools that depend on - * core OpenSearch as they could directly or indirectly touch classes that touch logging and as such logging needs to be configured. - * - * @opensearch.internal - */ -public abstract class LoggingAwareCommand extends Command { - - /** - * Construct the command with the specified command description. This command will have logging configured without reading OpenSearch - * configuration files. - * - * @param description the command description - */ - public LoggingAwareCommand(final String description) { - super(description, CommandLoggingConfigurator::configureLoggingWithoutConfig); - } - -} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java b/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java index 7d51002150fb9..f895bb07d5b87 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NodeToolCli.java @@ -31,9 +31,9 @@ package org.opensearch.cluster.coordination; -import org.opensearch.cli.CommandLoggingConfigurator; import org.opensearch.cli.MultiCommand; import org.opensearch.cli.Terminal; +import org.opensearch.common.cli.CommandLoggingConfigurator; import org.opensearch.env.NodeRepurposeCommand; import org.opensearch.env.OverrideNodeVersionCommand; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java index 896fe6fc8024b..92b88cab2dd70 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/OpenSearchNodeCommand.java @@ -40,7 +40,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.indices.rollover.Condition; -import org.opensearch.cli.EnvironmentAwareCommand; import org.opensearch.cli.Terminal; import org.opensearch.cli.UserException; import org.opensearch.cluster.ClusterModule; @@ -50,6 +49,7 @@ import org.opensearch.cluster.metadata.ComponentTemplateMetadata; import org.opensearch.cluster.metadata.DataStreamMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.common.cli.EnvironmentAwareCommand; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/cli/CommandLoggingConfigurator.java b/server/src/main/java/org/opensearch/common/cli/CommandLoggingConfigurator.java similarity index 98% rename from server/src/main/java/org/opensearch/cli/CommandLoggingConfigurator.java rename to server/src/main/java/org/opensearch/common/cli/CommandLoggingConfigurator.java index 8918725472160..a25cfb7de3b66 100644 --- a/server/src/main/java/org/opensearch/cli/CommandLoggingConfigurator.java +++ b/server/src/main/java/org/opensearch/common/cli/CommandLoggingConfigurator.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.cli; +package org.opensearch.common.cli; import org.apache.logging.log4j.Level; import org.opensearch.common.logging.LogConfigurator; diff --git a/server/src/main/java/org/opensearch/cli/EnvironmentAwareCommand.java b/server/src/main/java/org/opensearch/common/cli/EnvironmentAwareCommand.java similarity index 96% rename from server/src/main/java/org/opensearch/cli/EnvironmentAwareCommand.java rename to server/src/main/java/org/opensearch/common/cli/EnvironmentAwareCommand.java index 10c59ef673050..a2db1d275c24b 100644 --- a/server/src/main/java/org/opensearch/cli/EnvironmentAwareCommand.java +++ b/server/src/main/java/org/opensearch/common/cli/EnvironmentAwareCommand.java @@ -30,11 +30,15 @@ * GitHub history for details. */ -package org.opensearch.cli; +package org.opensearch.common.cli; import joptsimple.OptionSet; import joptsimple.OptionSpec; import joptsimple.util.KeyValuePair; +import org.opensearch.cli.Command; +import org.opensearch.cli.ExitCodes; +import org.opensearch.cli.Terminal; +import org.opensearch.cli.UserException; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; @@ -53,6 +57,8 @@ */ public abstract class EnvironmentAwareCommand extends Command { + public static final int MAX_PASSPHRASE_LENGTH = 128; + private final OptionSpec settingOption; /** diff --git a/server/src/main/java/org/opensearch/cli/LoggingAwareMultiCommand.java b/server/src/main/java/org/opensearch/common/cli/LoggingAwareMultiCommand.java similarity index 96% rename from server/src/main/java/org/opensearch/cli/LoggingAwareMultiCommand.java rename to server/src/main/java/org/opensearch/common/cli/LoggingAwareMultiCommand.java index 8453ed05a12d3..ff46e44b6faa5 100644 --- a/server/src/main/java/org/opensearch/cli/LoggingAwareMultiCommand.java +++ b/server/src/main/java/org/opensearch/common/cli/LoggingAwareMultiCommand.java @@ -30,7 +30,9 @@ * GitHub history for details. */ -package org.opensearch.cli; +package org.opensearch.common.cli; + +import org.opensearch.cli.MultiCommand; /** * A multi-command that is aware of logging. This class should be preferred over the base {@link MultiCommand} class for any CLI tools that diff --git a/server/src/main/java/org/opensearch/cli/package-info.java b/server/src/main/java/org/opensearch/common/cli/package-info.java similarity index 87% rename from server/src/main/java/org/opensearch/cli/package-info.java rename to server/src/main/java/org/opensearch/common/cli/package-info.java index 8e9f9f6360870..3150d87f323dc 100644 --- a/server/src/main/java/org/opensearch/cli/package-info.java +++ b/server/src/main/java/org/opensearch/common/cli/package-info.java @@ -9,4 +9,4 @@ /** * The command line interface module. */ -package org.opensearch.cli; +package org.opensearch.common.cli; diff --git a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java index 1ad3b7ab8875a..ed58e6b21e165 100644 --- a/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/opensearch/common/settings/KeyStoreWrapper.java @@ -134,7 +134,7 @@ private static class Entry { private static final String KEYSTORE_FILENAME = "opensearch.keystore"; /** The version of the metadata written before the keystore data. */ - static final int FORMAT_VERSION = 4; + public static final int FORMAT_VERSION = 4; /** The oldest metadata format version that can be read. */ private static final int MIN_FORMAT_VERSION = 1; @@ -631,7 +631,7 @@ public static void validateSettingName(String setting) { /** * Set a string setting. */ - synchronized void setString(String setting, char[] value) { + public synchronized void setString(String setting, char[] value) { ensureOpen(); validateSettingName(setting); @@ -646,7 +646,7 @@ synchronized void setString(String setting, char[] value) { /** * Set a file setting. */ - synchronized void setFile(String setting, byte[] bytes) { + public synchronized void setFile(String setting, byte[] bytes) { ensureOpen(); validateSettingName(setting); @@ -659,7 +659,7 @@ synchronized void setFile(String setting, byte[] bytes) { /** * Remove the given setting from the keystore. */ - void remove(String setting) { + public void remove(String setting) { ensureOpen(); Entry oldEntry = entries.get().remove(setting); if (oldEntry != null) { diff --git a/server/src/main/java/org/opensearch/index/shard/ShardToolCli.java b/server/src/main/java/org/opensearch/index/shard/ShardToolCli.java index 1b4eb5a3f8c39..04b26904565cc 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardToolCli.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardToolCli.java @@ -31,8 +31,8 @@ package org.opensearch.index.shard; -import org.opensearch.cli.LoggingAwareMultiCommand; import org.opensearch.cli.Terminal; +import org.opensearch.common.cli.LoggingAwareMultiCommand; /** * Class encapsulating and dispatching commands from the {@code opensearch-shard} command line tool diff --git a/server/src/main/java/org/opensearch/plugins/PluginLoaderIndirection.java b/server/src/main/java/org/opensearch/plugins/PluginLoaderIndirection.java index 199784d1f7630..8e28100306373 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginLoaderIndirection.java +++ b/server/src/main/java/org/opensearch/plugins/PluginLoaderIndirection.java @@ -32,6 +32,8 @@ package org.opensearch.plugins; +import org.opensearch.plugin.classloader.ExtendedPluginsClassLoader; + import java.util.List; /** diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index 72b8ada94a0d1..e5f9fbc483331 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -356,6 +356,37 @@ public int hashCode() { } } + public static void checkJarHellForPlugin( + Set classpath, + PluginInfo candidateInfo, + Path candidateDir, + Path pluginsDir, + Path modulesDir + ) throws Exception { + Set bundles = new HashSet<>(getPluginBundles(pluginsDir)); + bundles.addAll(getModuleBundles(modulesDir)); + bundles.add(new Bundle(candidateInfo, candidateDir)); + + List sortedBundles = sortBundles(bundles); + Map> transitiveUrls = new HashMap<>(); + for (Bundle bundle : sortedBundles) { + checkBundleJarHell(classpath, bundle, transitiveUrls); + } + } + + public static List findPluginsByDependency(Path pluginsDir, String pluginName) throws IOException { + List usedBy = new ArrayList<>(); + Set bundles = getPluginBundles(pluginsDir); + for (Bundle bundle : bundles) { + for (String extendedPlugin : bundle.plugin.getExtendedPlugins()) { + if (extendedPlugin.equals(pluginName)) { + usedBy.add(bundle.plugin.getName()); + } + } + } + return usedBy; + } + /** * Extracts all installed plugin directories from the provided {@code rootPath}. * @@ -388,7 +419,7 @@ public static List findPluginDirs(final Path rootPath) throws IOException /** * Verify the given plugin is compatible with the current OpenSearch installation. */ - static void verifyCompatibility(PluginInfo info) { + public static void verifyCompatibility(PluginInfo info) { if (!isPluginVersionCompatible(info, Version.CURRENT)) { throw new IllegalArgumentException( "Plugin [" @@ -413,7 +444,7 @@ public static boolean isPluginVersionCompatible(final PluginInfo pluginInfo, fin return true; } - static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOException { + public static void checkForFailedPluginRemovals(final Path pluginsDirectory) throws IOException { /* * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the * plugin. diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index cb549eafc0d21..e9dc2207510e0 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -1130,6 +1130,78 @@ public void testPluginCompatibilityWithSemverRange() { assertFalse(PluginsService.isPluginVersionCompatible(getPluginInfoWithWithSemverRange("~1.0.0"), Version.fromString("1.1.0"))); } + public void testFindPluginsByDependency() throws Exception { + Path tempDir = createTempDir(); + Path pluginsDir = tempDir.resolve("plugins"); + Files.createDirectories(pluginsDir); + + Path plugin1Dir = pluginsDir.resolve("plugin1"); + PluginTestUtil.writePluginProperties( + plugin1Dir, + "description", + "Plugin 1", + "name", + "plugin1", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + "1.8", + "classname", + "Plugin1", + "extended.plugins", + "base-plugin" + ); + + Path plugin2Dir = pluginsDir.resolve("plugin2"); + PluginTestUtil.writePluginProperties( + plugin2Dir, + "description", + "Plugin 2", + "name", + "plugin2", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + "1.8", + "classname", + "Plugin2", + "extended.plugins", + "base-plugin,other-plugin" + ); + + Path plugin3Dir = pluginsDir.resolve("plugin3"); + PluginTestUtil.writePluginProperties( + plugin3Dir, + "description", + "Plugin 3", + "name", + "plugin3", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + "1.8", + "classname", + "Plugin3", + "extended.plugins", + "other-plugin" + ); + + List basePluginDependents = PluginsService.findPluginsByDependency(pluginsDir, "base-plugin"); + assertThat(basePluginDependents, containsInAnyOrder("plugin1", "plugin2")); + + List otherPluginDependents = PluginsService.findPluginsByDependency(pluginsDir, "other-plugin"); + assertThat(otherPluginDependents, containsInAnyOrder("plugin2", "plugin3")); + + List nonExistentDependents = PluginsService.findPluginsByDependency(pluginsDir, "non-existent"); + assertTrue(nonExistentDependents.isEmpty()); + } + private PluginInfo getPluginInfoWithWithSemverRange(String semverRange) { return new PluginInfo( "my_plugin", From 2847695ad1fcc04c88b96c8bab0bfdf694fa05dc Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Wed, 29 Jan 2025 19:09:36 -0800 Subject: [PATCH 37/48] Unify precomputation of aggregations behind a common API (#16733) * Unify precomputation of aggregations behind a common API We've had a series of aggregation speedups that use the same strategy: instead of iterating through documents that match the query one-by-one, we can look at a Lucene segment and compute the aggregation directly (if some particular conditions are met). In every case, we've hooked that into custom logic hijacks the getLeafCollector method and throws CollectionTerminatedException. This creates the illusion that we're implementing a custom LeafCollector, when really we're not collecting at all (which is the whole point). With this refactoring, the mechanism (hijacking getLeafCollector) is moved into AggregatorBase. Aggregators that have a strategy to precompute their answer can override tryPrecomputeAggregationForLeaf, which is expected to return true if they managed to precompute. This should also make it easier to keep track of which aggregations have precomputation approaches (since they override this method). Signed-off-by: Michael Froh * Remove subaggregator check from CompositeAggregator Not sure why I added this, when the existing implementation didn't have it. That said, we *should* call finishLeaf() before precomputing the current leaf. Signed-off-by: Michael Froh * Resolve conflicts with star-tree changes Signed-off-by: Michael Froh * Skip precomputation when valuesSource is null Signed-off-by: Michael Froh * Add comment as suggested by @bowenlan-amzn Signed-off-by: Michael Froh --------- Signed-off-by: Michael Froh --- .../search/aggregations/AggregatorBase.java | 23 +++++- .../bucket/composite/CompositeAggregator.java | 9 ++- .../histogram/DateHistogramAggregator.java | 22 +++--- .../bucket/range/RangeAggregator.java | 12 ++-- .../GlobalOrdinalsStringTermsAggregator.java | 72 +++++++++---------- .../aggregations/metrics/AvgAggregator.java | 28 ++++---- .../aggregations/metrics/MaxAggregator.java | 36 +++++----- .../aggregations/metrics/MinAggregator.java | 36 ++++++---- .../aggregations/metrics/SumAggregator.java | 24 ++++--- .../metrics/ValueCountAggregator.java | 29 ++++---- .../search/startree/StarTreeQueryHelper.java | 9 +-- 11 files changed, 168 insertions(+), 132 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java index 47e9def094623..f91bf972a3d28 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java +++ b/server/src/main/java/org/opensearch/search/aggregations/AggregatorBase.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.ScoreMode; import org.opensearch.core.common.breaker.CircuitBreaker; @@ -200,6 +201,9 @@ public Map metadata() { @Override public final LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { + if (tryPrecomputeAggregationForLeaf(ctx)) { + throw new CollectionTerminatedException(); + } preGetSubLeafCollectors(ctx); final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); return getLeafCollector(ctx, sub); @@ -216,6 +220,21 @@ protected void preGetSubLeafCollectors(LeafReaderContext ctx) throws IOException */ protected void doPreCollection() throws IOException {} + /** + * Subclasses may override this method if they have an efficient way of computing their aggregation for the given + * segment (versus collecting matching documents). If this method returns true, collection for the given segment + * will be terminated, rather than executing normally. + *

+ * If this method returns true, the aggregator's state should be identical to what it would be if matching + * documents from the segment were fully collected. If this method returns false, the aggregator's state should + * be unchanged from before this method is called. + * @param ctx the context for the given segment + * @return true if and only if results for this segment have been precomputed + */ + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + return false; + } + @Override public final void preCollection() throws IOException { List collectors = Arrays.asList(subAggregators); @@ -251,8 +270,8 @@ public Aggregator[] subAggregators() { public Aggregator subAggregator(String aggName) { if (subAggregatorbyName == null) { subAggregatorbyName = new HashMap<>(subAggregators.length); - for (int i = 0; i < subAggregators.length; i++) { - subAggregatorbyName.put(subAggregators[i].name(), subAggregators[i]); + for (Aggregator subAggregator : subAggregators) { + subAggregatorbyName.put(subAggregator.name(), subAggregator); } } return subAggregatorbyName.get(aggName); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java index 7f5e23a3307ed..fcf2a40dada14 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeAggregator.java @@ -564,10 +564,13 @@ private void processLeafFromQuery(LeafReaderContext ctx, Sort indexSortPrefix) t } @Override - protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { - boolean optimized = filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); - if (optimized) throw new CollectionTerminatedException(); + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + finishLeaf(); // May need to wrap up previous leaf if it could not be precomputed + return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); + } + @Override + protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { finishLeaf(); boolean fillDocIdSet = deferredCollectors != NO_OP_COLLECTOR; diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 451b96dc3cf9c..ce6a2dc8ebe46 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -33,7 +33,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; @@ -187,22 +186,23 @@ public ScoreMode scoreMode() { } @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { - if (valuesSource == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; - } - - boolean optimized = filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); - if (optimized) throw new CollectionTerminatedException(); - - SortedNumericDocValues values = valuesSource.longValues(ctx); + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (preComputeWithStarTree(ctx, supportedStarTree) == true) { - throw new CollectionTerminatedException(); + return true; } } + return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, segmentMatchAll(context, ctx)); + } + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + + SortedNumericDocValues values = valuesSource.longValues(ctx); return new LeafBucketCollectorBase(sub, values) { @Override public void collect(int doc, long owningBucketOrd) throws IOException { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java index 1b6e0fe8c8d3f..c7303011b5800 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/range/RangeAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.range; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.ScoreMode; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; @@ -310,10 +309,15 @@ public ScoreMode scoreMode() { } @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - if (segmentMatchAll(context, ctx) && filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, false)) { - throw new CollectionTerminatedException(); + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + if (segmentMatchAll(context, ctx)) { + return filterRewriteOptimizationContext.tryOptimize(ctx, this::incrementBucketDocCount, false); } + return false; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); return new LeafBucketCollectorBase(sub, values) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 9dbc97f7d2cb6..ef925b7f6416a 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -40,7 +40,6 @@ import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; -import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.Weight; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; @@ -165,35 +164,32 @@ public void setWeight(Weight weight) { @return A LeafBucketCollector implementation with collection termination, since collection is complete @throws IOException If an I/O error occurs during reading */ - LeafBucketCollector termDocFreqCollector( - LeafReaderContext ctx, - SortedSetDocValues globalOrds, - BiConsumer ordCountConsumer - ) throws IOException { + boolean tryCollectFromTermFrequencies(LeafReaderContext ctx, SortedSetDocValues globalOrds, BiConsumer ordCountConsumer) + throws IOException { if (weight == null) { // Weight not assigned - cannot use this optimization - return null; + return false; } else { if (weight.count(ctx) == 0) { // No documents matches top level query on this segment, we can skip the segment entirely - return LeafBucketCollector.NO_OP_COLLECTOR; + return true; } else if (weight.count(ctx) != ctx.reader().maxDoc()) { // weight.count(ctx) == ctx.reader().maxDoc() implies there are no deleted documents and // top-level query matches all docs in the segment - return null; + return false; } } Terms segmentTerms = ctx.reader().terms(this.fieldName); if (segmentTerms == null) { // Field is not indexed. - return null; + return false; } NumericDocValues docCountValues = DocValues.getNumeric(ctx.reader(), DocCountFieldMapper.NAME); if (docCountValues.nextDoc() != NO_MORE_DOCS) { // This segment has at least one document with the _doc_count field. - return null; + return false; } TermsEnum indexTermsEnum = segmentTerms.iterator(); @@ -217,31 +213,28 @@ LeafBucketCollector termDocFreqCollector( ordinalTerm = globalOrdinalTermsEnum.next(); } } - return new LeafBucketCollector() { - @Override - public void collect(int doc, long owningBucketOrd) throws IOException { - throw new CollectionTerminatedException(); - } - }; + return true; } @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); - collectionStrategy.globalOrdsReady(globalOrds); - if (collectionStrategy instanceof DenseGlobalOrds && this.resultStrategy instanceof StandardTermsResults - && sub == LeafBucketCollector.NO_OP_COLLECTOR) { - LeafBucketCollector termDocFreqCollector = termDocFreqCollector( + && subAggregators.length == 0) { + return tryCollectFromTermFrequencies( ctx, globalOrds, (ord, docCount) -> incrementBucketDocCount(collectionStrategy.globalOrdToBucketOrd(0, ord), docCount) ); - if (termDocFreqCollector != null) { - return termDocFreqCollector; - } } + return false; + } + + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + SortedSetDocValues globalOrds = valuesSource.globalOrdinalsValues(ctx); + collectionStrategy.globalOrdsReady(globalOrds); SortedDocValues singleValues = DocValues.unwrapSingleton(globalOrds); if (singleValues != null) { @@ -436,6 +429,24 @@ static class LowCardinality extends GlobalOrdinalsStringTermsAggregator { this.segmentDocCounts = context.bigArrays().newLongArray(1, true); } + @Override + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + if (subAggregators.length == 0) { + if (mapping != null) { + mapSegmentCountsToGlobalCounts(mapping); + } + final SortedSetDocValues segmentOrds = valuesSource.ordinalsValues(ctx); + segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount()); + mapping = valuesSource.globalOrdinalsMapping(ctx); + return tryCollectFromTermFrequencies( + ctx, + segmentOrds, + (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) + ); + } + return false; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { if (mapping != null) { @@ -446,17 +457,6 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCol assert sub == LeafBucketCollector.NO_OP_COLLECTOR; mapping = valuesSource.globalOrdinalsMapping(ctx); - if (this.resultStrategy instanceof StandardTermsResults) { - LeafBucketCollector termDocFreqCollector = this.termDocFreqCollector( - ctx, - segmentOrds, - (ord, docCount) -> incrementBucketDocCount(mapping.applyAsLong(ord), docCount) - ); - if (termDocFreqCollector != null) { - return termDocFreqCollector; - } - } - final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds); if (singleValues != null) { segmentsWithSingleValuedOrds++; diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java index f71b6679a7c4d..5f99a9cc05558 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/AvgAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.FixedBitSet; @@ -104,23 +103,29 @@ public ScoreMode scoreMode() { } @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { if (valuesSource == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; + return false; } CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators - return LeafBucketCollector.NO_OP_COLLECTOR; + return true; } - return getStarTreeLeafCollector(ctx, sub, supportedStarTree); + precomputeLeafUsingStarTree(ctx, supportedStarTree); + return true; } - return getDefaultLeafCollector(ctx, sub); + return false; } - private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); final CompensatedSum kahanSummation = new CompensatedSum(0, 0); @@ -154,8 +159,7 @@ public void collect(int doc, long bucket) throws IOException { }; } - public LeafBucketCollector getStarTreeLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) - throws IOException { + private void precomputeLeafUsingStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { StarTreeValues starTreeValues = StarTreeQueryHelper.getStarTreeValues(ctx, starTree); assert starTreeValues != null; @@ -200,12 +204,6 @@ public LeafBucketCollector getStarTreeLeafCollector(LeafReaderContext ctx, LeafB sums.set(0, kahanSummation.value()); compensations.set(0, kahanSummation.delta()); - return new LeafBucketCollectorBase(sub, valuesSource.doubleValues(ctx)) { - @Override - public void collect(int doc, long bucket) { - throw new CollectionTerminatedException(); - } - }; } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java index c64a6cf29fb63..8a2c8a6de923f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MaxAggregator.java @@ -104,6 +104,24 @@ public ScoreMode scoreMode() { return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } + @Override + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + if (valuesSource == null) { + return false; + } + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); + if (supportedStarTree != null) { + if (parent != null && subAggregators.length == 0) { + // If this a child aggregator, then the parent will trigger star-tree pre-computation. + // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators + return true; + } + precomputeLeafUsingStarTree(ctx, supportedStarTree); + return true; + } + return false; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { @@ -130,20 +148,6 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc } } - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); - if (supportedStarTree != null) { - if (parent != null && subAggregators.length == 0) { - // If this a child aggregator, then the parent will trigger star-tree pre-computation. - // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators - return LeafBucketCollector.NO_OP_COLLECTOR; - } - getStarTreeCollector(ctx, sub, supportedStarTree); - } - return getDefaultLeafCollector(ctx, sub); - } - - private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { - final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MAX.select(allValues); @@ -167,9 +171,9 @@ public void collect(int doc, long bucket) throws IOException { }; } - public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { + private void precomputeLeafUsingStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { AtomicReference max = new AtomicReference<>(maxes.get(0)); - StarTreeQueryHelper.getStarTreeLeafCollector(context, valuesSource, ctx, sub, starTree, MetricStat.MAX.getTypeName(), value -> { + StarTreeQueryHelper.precomputeLeafUsingStarTree(context, valuesSource, ctx, starTree, MetricStat.MAX.getTypeName(), value -> { max.set(Math.max(max.get(), (NumericUtils.sortableLongToDouble(value)))); }, () -> maxes.set(0, max.get())); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java index 5cdee536cde19..84dda7928aa90 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/MinAggregator.java @@ -104,6 +104,25 @@ public ScoreMode scoreMode() { return valuesSource != null && valuesSource.needsScores() ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; } + @Override + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { + if (valuesSource == null) { + return false; + } + CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); + if (supportedStarTree != null) { + if (parent != null && subAggregators.length == 0) { + // If this a child aggregator, then the parent will trigger star-tree pre-computation. + // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators + return true; + } + precomputeLeafUsingStarTree(ctx, supportedStarTree); + return true; + } + + return false; + } + @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { @@ -129,19 +148,6 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc } } - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); - if (supportedStarTree != null) { - if (parent != null && subAggregators.length == 0) { - // If this a child aggregator, then the parent will trigger star-tree pre-computation. - // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators - return LeafBucketCollector.NO_OP_COLLECTOR; - } - getStarTreeCollector(ctx, sub, supportedStarTree); - } - return getDefaultLeafCollector(ctx, sub); - } - - private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MIN.select(allValues); @@ -164,9 +170,9 @@ public void collect(int doc, long bucket) throws IOException { }; } - public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { + private void precomputeLeafUsingStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { AtomicReference min = new AtomicReference<>(mins.get(0)); - StarTreeQueryHelper.getStarTreeLeafCollector(context, valuesSource, ctx, sub, starTree, MetricStat.MIN.getTypeName(), value -> { + StarTreeQueryHelper.precomputeLeafUsingStarTree(context, valuesSource, ctx, starTree, MetricStat.MIN.getTypeName(), value -> { min.set(Math.min(min.get(), (NumericUtils.sortableLongToDouble(value)))); }, () -> mins.set(0, min.get())); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java index edcfb61263fc1..ba32592f75ea1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/SumAggregator.java @@ -93,24 +93,29 @@ public ScoreMode scoreMode() { } @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { if (valuesSource == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; + return false; } - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators - return LeafBucketCollector.NO_OP_COLLECTOR; + return true; } - getStarTreeCollector(ctx, sub, supportedStarTree); + precomputeLeafUsingStarTree(ctx, supportedStarTree); + return true; } - return getDefaultLeafCollector(ctx, sub); + return false; } - private LeafBucketCollector getDefaultLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException { + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { + + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues values = valuesSource.doubleValues(ctx); final CompensatedSum kahanSummation = new CompensatedSum(0, 0); @@ -140,14 +145,13 @@ public void collect(int doc, long bucket) throws IOException { }; } - public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { + private void precomputeLeafUsingStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { final CompensatedSum kahanSummation = new CompensatedSum(sums.get(0), compensations.get(0)); - StarTreeQueryHelper.getStarTreeLeafCollector( + StarTreeQueryHelper.precomputeLeafUsingStarTree( context, valuesSource, ctx, - sub, starTree, MetricStat.SUM.getTypeName(), value -> kahanSummation.add(NumericUtils.sortableLongToDouble(value)), diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java index d298361391ad9..3541753d94e6f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/ValueCountAggregator.java @@ -88,24 +88,30 @@ public ValueCountAggregator( } @Override - public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - if (valuesSource == null) { - return LeafBucketCollector.NO_OP_COLLECTOR; - } - final BigArrays bigArrays = context.bigArrays(); - + protected boolean tryPrecomputeAggregationForLeaf(LeafReaderContext ctx) throws IOException { if (valuesSource instanceof ValuesSource.Numeric) { - CompositeIndexFieldInfo supportedStarTree = getSupportedStarTree(this.context.getQueryShardContext()); if (supportedStarTree != null) { if (parent != null && subAggregators.length == 0) { // If this a child aggregator, then the parent will trigger star-tree pre-computation. // Returning NO_OP_COLLECTOR explicitly because the getLeafCollector() are invoked starting from innermost aggregators - return LeafBucketCollector.NO_OP_COLLECTOR; + return true; } - getStarTreeCollector(ctx, sub, supportedStarTree); + precomputeLeafUsingStarTree(ctx, supportedStarTree); + return true; } + } + return false; + } + @Override + public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { + if (valuesSource == null) { + return LeafBucketCollector.NO_OP_COLLECTOR; + } + final BigArrays bigArrays = context.bigArrays(); + + if (valuesSource instanceof ValuesSource.Numeric) { final SortedNumericDocValues values = ((ValuesSource.Numeric) valuesSource).longValues(ctx); return new LeafBucketCollectorBase(sub, values) { @@ -145,12 +151,11 @@ public void collect(int doc, long bucket) throws IOException { }; } - public void getStarTreeCollector(LeafReaderContext ctx, LeafBucketCollector sub, CompositeIndexFieldInfo starTree) throws IOException { - StarTreeQueryHelper.getStarTreeLeafCollector( + private void precomputeLeafUsingStarTree(LeafReaderContext ctx, CompositeIndexFieldInfo starTree) throws IOException { + StarTreeQueryHelper.precomputeLeafUsingStarTree( context, (ValuesSource.Numeric) valuesSource, ctx, - sub, starTree, MetricStat.VALUE_COUNT.getTypeName(), value -> counts.increment(0, value), diff --git a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java index edbccb53853d5..0e3bc220461b9 100644 --- a/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/search/startree/StarTreeQueryHelper.java @@ -10,7 +10,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.util.FixedBitSet; import org.opensearch.common.lucene.Lucene; @@ -21,7 +20,6 @@ import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.query.QueryShardContext; -import org.opensearch.search.aggregations.LeafBucketCollector; import org.opensearch.search.aggregations.StarTreeBucketCollector; import org.opensearch.search.aggregations.support.ValuesSource; import org.opensearch.search.internal.SearchContext; @@ -70,11 +68,10 @@ public static StarTreeValues getStarTreeValues(LeafReaderContext context, Compos * Get the star-tree leaf collector * This collector computes the aggregation prematurely and invokes an early termination collector */ - public static void getStarTreeLeafCollector( + public static void precomputeLeafUsingStarTree( SearchContext context, ValuesSource.Numeric valuesSource, LeafReaderContext ctx, - LeafBucketCollector sub, CompositeIndexFieldInfo starTree, String metric, Consumer valueConsumer, @@ -112,10 +109,6 @@ public static void getStarTreeLeafCollector( // Call the final consumer after processing all entries finalConsumer.run(); - - // FIXME : Remove after @msfroh PR for precompute - // Terminate after pre-computing aggregation - throw new CollectionTerminatedException(); } /** From d601c37b5757155bba807b2a826c9f03b4849564 Mon Sep 17 00:00:00 2001 From: Tommy Shao <69884021+anntians@users.noreply.github.com> Date: Thu, 30 Jan 2025 11:19:25 -0800 Subject: [PATCH 38/48] Register UnmodifiableOnRestore settings (#17121) * Register UnmodifiableOnRestore settings Signed-off-by: AnnTian Shao * fixes to tests Signed-off-by: AnnTian Shao * fix typo Signed-off-by: AnnTian Shao * fix javadoc Signed-off-by: AnnTian Shao --------- Signed-off-by: AnnTian Shao Co-authored-by: AnnTian Shao --- .../opensearch/client/IndicesClientIT.java | 39 +++++++++++++++++++ .../admin/indices/create/CreateIndexIT.java | 22 +++++------ .../cluster/metadata/IndexMetadata.java | 25 ++++++++++++ .../common/settings/IndexScopedSettings.java | 6 +-- .../opensearch/snapshots/RestoreService.java | 11 +----- .../opensearch/index/IndexSettingsTests.java | 9 +++-- .../test/InternalSettingsPlugin.java | 9 ----- 7 files changed, 83 insertions(+), 38 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index 0399e4667d85d..75022820c7fb8 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -134,6 +134,7 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.support.XContentMapValues.extractRawValues; @@ -256,6 +257,26 @@ public void testCreateIndex() throws IOException { } } + public void testCreateIndexFailPrivateSetting() throws IOException { + { + // Create index with private setting + String indexName = "private_index"; + assertFalse(indexExists(indexName)); + + CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); + + Settings.Builder settings = Settings.builder(); + settings.put(SETTING_CREATION_DATE, -1); + createIndexRequest.settings(settings); + + OpenSearchStatusException exception = expectThrows( + OpenSearchStatusException.class, + () -> execute(createIndexRequest, highLevelClient().indices()::create, highLevelClient().indices()::createAsync) + ); + assertTrue(exception.getMessage().contains("private index setting [index.creation_date] can not be set explicitly")); + } + } + public void testGetSettings() throws IOException { String indexName = "get_settings_index"; Settings basicSettings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); @@ -281,6 +302,24 @@ public void testGetSettings() throws IOException { assertEquals("30s", updatedResponse.getSetting(indexName, "index.refresh_interval")); } + public void testGetPrivateSettings() throws IOException { + String indexName = "get_settings_index"; + Settings basicSettings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); + + createIndex(indexName, basicSettings); + + GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(indexName); + GetSettingsResponse getSettingsResponse = execute( + getSettingsRequest, + highLevelClient().indices()::getSettings, + highLevelClient().indices()::getSettingsAsync + ); + + assertNull(getSettingsResponse.getSetting(indexName, "index.refresh_interval")); + assertNotNull(getSettingsResponse.getSetting(indexName, "index.creation_date")); + assertNotNull(getSettingsResponse.getSetting(indexName, "index.uuid")); + } + public void testGetSettingsNonExistentIndex() throws IOException { String nonExistentIndex = "index_that_doesnt_exist"; assertFalse(indexExists(nonExistentIndex)); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index d713c9cc86841..b41b3f07d3ac1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -90,19 +90,6 @@ @ClusterScope(scope = Scope.TEST) public class CreateIndexIT extends OpenSearchIntegTestCase { - public void testCreationDateGivenFails() { - try { - prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_CREATION_DATE, 4L)).get(); - fail(); - } catch (SettingsException ex) { - assertEquals( - "unknown setting [index.creation_date] please check that any required plugins are installed, or check the " - + "breaking changes documentation for removed settings", - ex.getMessage() - ); - } - } - public void testCreationDateGenerated() { long timeBeforeRequest = System.currentTimeMillis(); prepareCreate("test").get(); @@ -224,6 +211,15 @@ public void testUnknownSettingFails() { } } + public void testPrivateSettingFails() { + try { + prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_CREATION_DATE, -1).build()).get(); + fail("should have thrown an exception about private settings"); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("private index setting [index.creation_date] can not be set explicitly")); + } + } + public void testInvalidShardCountSettingsWithoutPrefix() throws Exception { int value = randomIntBetween(-10, 0); try { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index 4e605dce498ab..d50192f106cfe 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -588,6 +588,15 @@ public static APIBlock readFrom(StreamInput input) throws IOException { public static final String SETTING_VERSION_UPGRADED_STRING = "index.version.upgraded_string"; public static final String SETTING_CREATION_DATE = "index.creation_date"; + public static final Setting SETTING_INDEX_CREATION_DATE = Setting.longSetting( + SETTING_CREATION_DATE, + -1, + -1, + Property.IndexScope, + Property.PrivateIndex, + Property.UnmodifiableOnRestore + ); + /** * The user provided name for an index. This is the plain string provided by the user when the index was created. * It might still contain date math expressions etc. (added in 5.0) @@ -614,6 +623,22 @@ public static APIBlock readFrom(StreamInput input) throws IOException { public static final String INDEX_UUID_NA_VALUE = Strings.UNKNOWN_UUID_VALUE; + public static final Setting INDEX_UUID_SETTING = Setting.simpleString( + SETTING_INDEX_UUID, + INDEX_UUID_NA_VALUE, + Property.IndexScope, + Property.PrivateIndex, + Property.UnmodifiableOnRestore + ); + + public static final Setting SETTING_INDEX_HISTORY_UUID = Setting.simpleString( + SETTING_HISTORY_UUID, + INDEX_UUID_NA_VALUE, + Property.IndexScope, + Property.PrivateIndex, + Property.UnmodifiableOnRestore + ); + public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require"; public static final String INDEX_ROUTING_INCLUDE_GROUP_PREFIX = "index.routing.allocation.include"; public static final String INDEX_ROUTING_EXCLUDE_GROUP_PREFIX = "index.routing.allocation.exclude"; diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index b8ace8495ad96..0e21104fb6426 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -91,6 +91,9 @@ public final class IndexScopedSettings extends AbstractScopedSettings { MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, IndexMetadata.SETTING_INDEX_VERSION_CREATED, + IndexMetadata.SETTING_INDEX_CREATION_DATE, + IndexMetadata.INDEX_UUID_SETTING, + IndexMetadata.SETTING_INDEX_HISTORY_UUID, IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING, IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING, @@ -316,9 +319,6 @@ protected void validateSettingKey(Setting setting) { @Override public boolean isPrivateSetting(String key) { switch (key) { - case IndexMetadata.SETTING_CREATION_DATE: - case IndexMetadata.SETTING_INDEX_UUID: - case IndexMetadata.SETTING_HISTORY_UUID: case IndexMetadata.SETTING_VERSION_UPGRADED: case IndexMetadata.SETTING_INDEX_PROVIDED_NAME: case MergePolicyProvider.INDEX_MERGE_ENABLED: diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 29ced9d5f0f0c..89403b15f6aca 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -118,9 +118,7 @@ import static java.util.Collections.unmodifiableSet; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_HISTORY_UUID; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; @@ -162,14 +160,7 @@ public class RestoreService implements ClusterStateApplier { private static final Logger logger = LogManager.getLogger(RestoreService.class); private static final Set USER_UNMODIFIABLE_SETTINGS = unmodifiableSet( - newHashSet( - SETTING_INDEX_UUID, - SETTING_CREATION_DATE, - SETTING_HISTORY_UUID, - SETTING_REMOTE_STORE_ENABLED, - SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, - SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY - ) + newHashSet(SETTING_REMOTE_STORE_ENABLED, SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY) ); // It's OK to change some settings, but we shouldn't allow simply removing them diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 474ec73d5fe61..bc505daa607c1 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -607,8 +607,11 @@ public void testTranslogGenerationSizeThreshold() { assertEquals(actual, settings.getGenerationThresholdSize()); } + /** + * Test private setting validation for private settings defined in {@link IndexScopedSettings#isPrivateSetting(String)} + */ public void testPrivateSettingsValidation() { - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_CREATION_DATE, System.currentTimeMillis()).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_UPGRADED, Version.V_EMPTY).build(); final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); { @@ -617,7 +620,7 @@ public void testPrivateSettingsValidation() { SettingsException.class, () -> indexScopedSettings.validate(settings, randomBoolean()) ); - assertThat(e, hasToString(containsString("unknown setting [index.creation_date]"))); + assertThat(e, hasToString(containsString("unknown setting [index.version.upgraded]"))); } { @@ -626,7 +629,7 @@ public void testPrivateSettingsValidation() { SettingsException.class, () -> indexScopedSettings.validate(settings, randomBoolean(), false, randomBoolean()) ); - assertThat(e, hasToString(containsString("unknown setting [index.creation_date]"))); + assertThat(e, hasToString(containsString("unknown setting [index.version.upgraded]"))); } // nothing should happen since we are ignoring private settings diff --git a/test/framework/src/main/java/org/opensearch/test/InternalSettingsPlugin.java b/test/framework/src/main/java/org/opensearch/test/InternalSettingsPlugin.java index 986cfd9c5b613..96919f65f88fc 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalSettingsPlugin.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalSettingsPlugin.java @@ -31,7 +31,6 @@ package org.opensearch.test; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; @@ -59,13 +58,6 @@ public final class InternalSettingsPlugin extends Plugin { Property.IndexScope, Property.NodeScope ); - public static final Setting INDEX_CREATION_DATE_SETTING = Setting.longSetting( - IndexMetadata.SETTING_CREATION_DATE, - -1, - -1, - Property.IndexScope, - Property.NodeScope - ); public static final Setting TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING = Setting.timeSetting( "index.translog.retention.check_interval", new TimeValue(10, TimeUnit.MINUTES), @@ -78,7 +70,6 @@ public final class InternalSettingsPlugin extends Plugin { public List> getSettings() { return Arrays.asList( MERGE_ENABLED, - INDEX_CREATION_DATE_SETTING, PROVIDED_NAME_SETTING, TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING, RemoteConnectionStrategy.REMOTE_MAX_PENDING_CONNECTION_LISTENERS, From 20a58eba1d11017a846301b7d04d20158f2b6122 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 30 Jan 2025 11:51:58 -0800 Subject: [PATCH 39/48] Replace usage of transitive shaded dependency (#17194) Signed-off-by: Andrew Ross --- .../src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java index 39af56ea04ed7..543a5ecfc5497 100644 --- a/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java +++ b/plugins/ingestion-kafka/src/test/java/org/opensearch/plugin/kafka/KafkaUtils.java @@ -24,8 +24,6 @@ import java.util.concurrent.TimeUnit; import java.util.function.Function; -import org.testcontainers.shaded.com.google.common.collect.ImmutableMap; - import static org.awaitility.Awaitility.await; public class KafkaUtils { @@ -76,7 +74,7 @@ public static boolean checkTopicExistence(String topicName, String bootstrapServ private static Rep getAdminClient(String bootstrapServer, Function function) { AdminClient adminClient = KafkaAdminClient.create( - ImmutableMap.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer, AdminClientConfig.CLIENT_ID_CONFIG, "test") + Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer, AdminClientConfig.CLIENT_ID_CONFIG, "test") ); try { return function.apply(adminClient); From 1bf8b9c3d01417f2877b803ddf8b45432408e662 Mon Sep 17 00:00:00 2001 From: Eugene Tolbakov Date: Thu, 30 Jan 2025 19:57:11 +0000 Subject: [PATCH 40/48] Add highlighting for match_only_text type (#17101) --------- Signed-off-by: Eugene Tolbakov --- CHANGELOG.md | 1 + .../11_match_field_match_only_text.yml | 79 ++++++++++++++++++- ...ligthing.yml => 30_ngram_highlighting.yml} | 0 ...am_highlighting_field_match_only_text.yml} | 0 .../subphase/highlight/HighlightPhase.java | 7 +- 5 files changed, 84 insertions(+), 3 deletions(-) rename modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/{30_ngram_highligthing.yml => 30_ngram_highlighting.yml} (100%) rename modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/{30_ngram_highligthing_field_match_only_text.yml => 30_ngram_highlighting_field_match_only_text.yml} (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 242a10d4ae6da..5252a3c955458 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix exists queries on nested flat_object fields throws exception ([#16803](https://github.com/opensearch-project/OpenSearch/pull/16803)) - Use OpenSearch version to deserialize remote custom metadata([#16494](https://github.com/opensearch-project/OpenSearch/pull/16494)) - Fix AutoDateHistogramAggregator rounding assertion failure ([#17023](https://github.com/opensearch-project/OpenSearch/pull/17023)) +- Add highlighting for wildcard search on `match_only_text` field ([#17101](https://github.com/opensearch-project/OpenSearch/pull/17101)) - Fix the failing CI's with `Failed to load eclipse jdt formatter` error ([#17172](https://github.com/opensearch-project/OpenSearch/pull/17172)) ### Security diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml index 140d70414a4a7..b891c5496ac65 100644 --- a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml +++ b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/11_match_field_match_only_text.yml @@ -1,5 +1,5 @@ # integration tests for queries with specific analysis chains - +--- "match query with stacked stems": - skip: version: " - 2.11.99" @@ -68,3 +68,80 @@ query: fox runs operator: AND - match: {hits.total: 2} + +--- +"wildcard highlighting on match_only_text": + - skip: + version: " - 2.99.99" + reason: "wildcard highlighting on match_only_text type was added in 2.19" + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + number_of_replicas: 1 + analysis: + analyzer: + index: + tokenizer: standard + filter: [lowercase] + search: + rest_total_hits_as_int: true + tokenizer: standard + filter: [lowercase, keyword_repeat, porter_stem, unique_stem] + filter: + unique_stem: + type: unique + only_on_same_position: true + mappings: + properties: + text: + type: match_only_text + analyzer: index + search_analyzer: search + + - do: + index: + index: test + id: 1 + body: { "text": "the fox runs across the street" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fox runs + operator: AND + highlight: + fields: + - text: {} + - match: {hits.total: 1} + - match: {hits.hits.0.highlight.text.0: "the fox runs across the street"} + + - do: + index: + index: test + id: 2 + body: { "text": "run fox run" } + refresh: true + + - do: + search: + rest_total_hits_as_int: true + body: + query: + match: + text: + query: fox runs + operator: AND + highlight: + fields: + - text: {} + - match: {hits.total: 2} + - match: {hits.hits.0.highlight.text.0: "the fox runs across the street"} + - match: {hits.hits.1.highlight.text.0: "run fox run"} diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highlighting.yml similarity index 100% rename from modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing.yml rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highlighting.yml diff --git a/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml b/modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highlighting_field_match_only_text.yml similarity index 100% rename from modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highligthing_field_match_only_text.yml rename to modules/analysis-common/src/yamlRestTest/resources/rest-api-spec/test/search.query/30_ngram_highlighting_field_match_only_text.yml diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightPhase.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightPhase.java index 41a7e9934fc4d..22b6948ab4ec1 100644 --- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightPhase.java @@ -37,6 +37,7 @@ import org.opensearch.common.regex.Regex; import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.MappedFieldType; +import org.opensearch.index.mapper.MatchOnlyTextFieldMapper; import org.opensearch.index.mapper.SourceFieldMapper; import org.opensearch.index.mapper.TextFieldMapper; import org.opensearch.search.fetch.FetchContext; @@ -152,7 +153,8 @@ private Map> contextBuilders continue; } - // We should prevent highlighting if a field is anything but a text or keyword field. + // We should prevent highlighting if a field is anything but a text, match_only_text + // or keyword field. // However, someone might implement a custom field type that has text and still want to // highlight on that. We cannot know in advance if the highlighter will be able to // highlight such a field and so we do the following: @@ -162,7 +164,8 @@ private Map> contextBuilders // what they were doing and try to highlight anyway. if (fieldNameContainsWildcards) { if (fieldType.typeName().equals(TextFieldMapper.CONTENT_TYPE) == false - && fieldType.typeName().equals(KeywordFieldMapper.CONTENT_TYPE) == false) { + && fieldType.typeName().equals(KeywordFieldMapper.CONTENT_TYPE) == false + && fieldType.typeName().equals(MatchOnlyTextFieldMapper.CONTENT_TYPE) == false) { continue; } if (highlighter.canHighlight(fieldType) == false) { From e7178f292baeb944f6aeb9be9937573e566452bc Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 30 Jan 2025 14:54:27 -0800 Subject: [PATCH 41/48] Remove DEBUG log level from test cluster node (#17195) The linked issue was closed in 2019 (pre-fork) so I'm going to remove these log levels. If anyone complains about missing logs in test environments we can add them again. Signed-off-by: Andrew Ross --- .../org/opensearch/gradle/testclusters/OpenSearchNode.java | 4 ---- 1 file changed, 4 deletions(-) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java index cd22560af9a96..aaa2daef2a158 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/OpenSearchNode.java @@ -1188,10 +1188,6 @@ private void createConfiguration() { // Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master baseConfig.put("discovery.initial_state_timeout", "0s"); - // TODO: Remove these once https://github.com/elastic/elasticsearch/issues/46091 is fixed - baseConfig.put("logger.org.opensearch.action.support.master", "DEBUG"); - baseConfig.put("logger.org.opensearch.cluster.coordination", "DEBUG"); - HashSet overriden = new HashSet<>(baseConfig.keySet()); overriden.retainAll(settings.keySet()); OVERRIDABLE_SETTINGS.forEach(overriden::remove); From 6d45e0d1add8f87ec8dc27288f5ae397d3e67aaa Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Fri, 31 Jan 2025 10:32:50 -0500 Subject: [PATCH 42/48] Update dependabot_pr workflow to use jdk 21 (#17217) Signed-off-by: Craig Perkins --- .github/workflows/dependabot_pr.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/dependabot_pr.yml b/.github/workflows/dependabot_pr.yml index 25abd99cadb96..71ca74050ccfb 100644 --- a/.github/workflows/dependabot_pr.yml +++ b/.github/workflows/dependabot_pr.yml @@ -23,10 +23,10 @@ jobs: token: ${{ steps.github_app_token.outputs.token }} # See please https://docs.gradle.org/8.10/userguide/upgrading_version_8.html#minimum_daemon_jvm_version - - name: Set up JDK 17 + - name: Set up JDK 21 uses: actions/setup-java@v4 with: - java-version: 17 + java-version: 21 distribution: temurin - name: Update Gradle SHAs From 75492742debf0b9ff96690ec7232b8adb1118fb9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:41:48 -0500 Subject: [PATCH 43/48] Bump ch.qos.logback:logback-classic from 1.5.15 to 1.5.16 in /test/fixtures/hdfs-fixture (#17133) * Bump ch.qos.logback:logback-classic in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-classic](https://github.com/qos-ch/logback) from 1.5.15 to 1.5.16. - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.15...v_1.5.16) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-classic dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5252a3c955458..e2e6ef0307ba5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,7 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `codecov/codecov-action` from 4 to 5 ([#16667](https://github.com/opensearch-project/OpenSearch/pull/16667)) - Bump `org.apache.logging.log4j:log4j-core` from 2.24.1 to 2.24.3 ([#16718](https://github.com/opensearch-project/OpenSearch/pull/16718), [#16858](https://github.com/opensearch-project/OpenSearch/pull/16858)) - Bump `jackson` from 2.17.2 to 2.18.2 ([#16733](https://github.com/opensearch-project/OpenSearch/pull/16733)) -- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.15 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716), [#16898](https://github.com/opensearch-project/OpenSearch/pull/16898)) +- Bump `ch.qos.logback:logback-classic` from 1.2.13 to 1.5.16 ([#16716](https://github.com/opensearch-project/OpenSearch/pull/16716), [#16898](https://github.com/opensearch-project/OpenSearch/pull/16898), [#17133](https://github.com/opensearch-project/OpenSearch/pull/17133)) - Bump `com.azure:azure-identity` from 1.13.2 to 1.14.2 ([#16778](https://github.com/opensearch-project/OpenSearch/pull/16778)) - Bump Apache Lucene from 9.12.0 to 9.12.1 ([#16846](https://github.com/opensearch-project/OpenSearch/pull/16846)) - Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 49a728586b2fa..ea26d24c862b0 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -75,7 +75,7 @@ dependencies { api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" api "ch.qos.logback:logback-core:1.5.16" - api "ch.qos.logback:logback-classic:1.5.15" + api "ch.qos.logback:logback-classic:1.5.16" api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.28.0' api 'org.apache.commons:commons-configuration2:2.11.0' From 0720fccd789559dec0e3ea50ab0c44cf423f62c0 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 31 Jan 2025 13:24:10 -0500 Subject: [PATCH 44/48] Update 2.x to 2.20.0 (#17220) Signed-off-by: Andriy Redko --- .ci/bwcVersions | 3 ++- libs/core/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index d1b4e4c509cb9..73b14fa56190e 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -42,4 +42,5 @@ BWC_VERSION: - "2.17.2" - "2.18.0" - "2.18.1" - - "2.19.0" \ No newline at end of file + - "2.19.0" + - "2.20.0" \ No newline at end of file diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java index 9db30ed47cb50..339a162bb0a33 100644 --- a/libs/core/src/main/java/org/opensearch/Version.java +++ b/libs/core/src/main/java/org/opensearch/Version.java @@ -114,6 +114,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_18_0 = new Version(2180099, org.apache.lucene.util.Version.LUCENE_9_12_0); public static final Version V_2_18_1 = new Version(2180199, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_2_19_0 = new Version(2190099, org.apache.lucene.util.Version.LUCENE_9_12_1); + public static final Version V_2_20_0 = new Version(2200099, org.apache.lucene.util.Version.LUCENE_9_12_1); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_10_1_0); public static final Version CURRENT = V_3_0_0; From 6fb0c1b3456bdb83fa267cd2eac0955a60da36a4 Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Fri, 31 Jan 2025 13:38:48 -0800 Subject: [PATCH 45/48] More removal of deprecated 'master' code (#17218) Signed-off-by: Andrew Ross --- .../opensearch/client/RequestConverters.java | 8 -- .../org/opensearch/client/TimedRequest.java | 22 --- .../indices/GetComponentTemplatesRequest.java | 21 --- .../GetComposableIndexTemplateRequest.java | 21 --- .../indices/GetIndexTemplatesRequest.java | 21 --- .../main/java/org/opensearch/client/Node.java | 9 -- .../opensearch/snapshots/RepositoriesIT.java | 2 +- .../cluster/health/ClusterHealthResponse.java | 6 - .../TransportClusterManagerNodeAction.java | 38 +---- .../info/TransportClusterInfoAction.java | 22 +-- .../cluster/ClusterChangedEvent.java | 10 -- .../cluster/ClusterStateTaskExecutor.java | 10 -- .../cluster/ClusterStateTaskListener.java | 11 -- .../action/index/MappingUpdatedAction.java | 13 -- .../coordination/JoinTaskExecutor.java | 23 +-- .../NoClusterManagerBlockService.java | 19 --- .../cluster/coordination/PeersResponse.java | 9 -- .../cluster/health/ClusterStateHealth.java | 6 - .../MetadataIndexTemplateService.java | 20 --- .../cluster/node/DiscoveryNode.java | 16 --- .../cluster/node/DiscoveryNodes.java | 96 ------------- .../routing/DelayedAllocationService.java | 7 - .../index/seqno/ReplicationTracker.java | 16 --- .../cluster/RestRestoreRemoteStoreAction.java | 4 +- .../RestoreRemoteStoreRequestTests.java | 4 +- ...ransportClusterManagerNodeActionTests.java | 8 +- .../coordination/JoinTaskExecutorTests.java | 2 - .../cluster/coordination/NodeJoinTests.java | 4 +- .../service/ClusterApplierServiceTests.java | 4 +- .../service/ClusterManagerServiceTests.java | 8 +- .../ClusterManagerTaskThrottlerTests.java | 5 - .../indices/cluster/ClusterStateChanges.java | 6 - .../AbstractSnapshotIntegTestCase.java | 30 ---- .../opensearch/test/InternalTestCluster.java | 131 ------------------ .../java/org/opensearch/test/NodeRoles.java | 36 ----- .../java/org/opensearch/test/TestCluster.java | 14 +- .../test/rest/yaml/ClientYamlTestClient.java | 6 - .../yaml/ClientYamlTestExecutionContext.java | 7 - 38 files changed, 26 insertions(+), 669 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 3546776fa3617..25242b5ef798e 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -946,14 +946,6 @@ Params withFields(String[] fields) { return this; } - /** - * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #withClusterManagerTimeout(TimeValue)} - */ - @Deprecated - Params withMasterTimeout(TimeValue clusterManagerTimeout) { - return putParam("master_timeout", clusterManagerTimeout); - } - Params withClusterManagerTimeout(TimeValue clusterManagerTimeout) { return putParam("cluster_manager_timeout", clusterManagerTimeout); } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java index d40445b2daa81..77c004341c1c3 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TimedRequest.java @@ -45,9 +45,6 @@ public abstract class TimedRequest implements Validatable { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); public static final TimeValue DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT} */ - @Deprecated - public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; private TimeValue timeout = DEFAULT_ACK_TIMEOUT; private TimeValue clusterManagerTimeout = DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; @@ -68,16 +65,6 @@ public void setClusterManagerTimeout(TimeValue clusterManagerTimeout) { this.clusterManagerTimeout = clusterManagerTimeout; } - /** - * Sets the timeout to connect to the cluster-manager node - * @param clusterManagerTimeout timeout as a {@link TimeValue} - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerTimeout(TimeValue)} - */ - @Deprecated - public void setMasterTimeout(TimeValue clusterManagerTimeout) { - setClusterManagerTimeout(clusterManagerTimeout); - } - /** * Returns the request timeout */ @@ -91,13 +78,4 @@ public TimeValue timeout() { public TimeValue clusterManagerNodeTimeout() { return clusterManagerTimeout; } - - /** - * Returns the timeout for the request to be completed on the cluster-manager node - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeTimeout()} - */ - @Deprecated - public TimeValue masterNodeTimeout() { - return clusterManagerNodeTimeout(); - } } diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java index 6326e8edf763b..c4117bbee677d 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComponentTemplatesRequest.java @@ -71,25 +71,10 @@ public TimeValue getClusterManagerNodeTimeout() { return clusterManagerNodeTimeout; } - /** - * @return the timeout for waiting for the cluster-manager node to respond - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getMasterNodeTimeout()} - */ - @Deprecated - public TimeValue getMasterNodeTimeout() { - return getClusterManagerNodeTimeout(); - } - public void setClusterManagerNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(TimeValue)} */ - @Deprecated - public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { - setClusterManagerNodeTimeout(clusterManagerNodeTimeout); - } - public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { final TimeValue timeValue = TimeValue.parseTimeValue( clusterManagerNodeTimeout, @@ -98,12 +83,6 @@ public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { setClusterManagerNodeTimeout(timeValue); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(String)} */ - @Deprecated - public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { - setClusterManagerNodeTimeout(clusterManagerNodeTimeout); - } - /** * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java index 73f6f15fc7a78..20d0b964549bc 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetComposableIndexTemplateRequest.java @@ -71,25 +71,10 @@ public TimeValue getClusterManagerNodeTimeout() { return clusterManagerNodeTimeout; } - /** - * @return the timeout for waiting for the cluster-manager node to respond - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getMasterNodeTimeout()} - */ - @Deprecated - public TimeValue getMasterNodeTimeout() { - return getClusterManagerNodeTimeout(); - } - public void setClusterManagerNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(TimeValue)} */ - @Deprecated - public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { - setClusterManagerNodeTimeout(clusterManagerNodeTimeout); - } - public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { final TimeValue timeValue = TimeValue.parseTimeValue( clusterManagerNodeTimeout, @@ -98,12 +83,6 @@ public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { setClusterManagerNodeTimeout(timeValue); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(String)} */ - @Deprecated - public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { - setClusterManagerNodeTimeout(clusterManagerNodeTimeout); - } - /** * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java index dc1759a7272e8..4368fd0248b6d 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/GetIndexTemplatesRequest.java @@ -90,25 +90,10 @@ public TimeValue getClusterManagerNodeTimeout() { return clusterManagerNodeTimeout; } - /** - * @return the timeout for waiting for the cluster-manager node to respond - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getMasterNodeTimeout()} - */ - @Deprecated - public TimeValue getMasterNodeTimeout() { - return getClusterManagerNodeTimeout(); - } - public void setClusterManagerNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { this.clusterManagerNodeTimeout = clusterManagerNodeTimeout; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(TimeValue)} */ - @Deprecated - public void setMasterNodeTimeout(@Nullable TimeValue clusterManagerNodeTimeout) { - setClusterManagerNodeTimeout(clusterManagerNodeTimeout); - } - public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { final TimeValue timeValue = TimeValue.parseTimeValue( clusterManagerNodeTimeout, @@ -117,12 +102,6 @@ public void setClusterManagerNodeTimeout(String clusterManagerNodeTimeout) { setClusterManagerNodeTimeout(timeValue); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #setClusterManagerNodeTimeout(String)} */ - @Deprecated - public void setMasterNodeTimeout(String clusterManagerNodeTimeout) { - setClusterManagerNodeTimeout(clusterManagerNodeTimeout); - } - /** * @return true if this request is to read from the local cluster state, rather than the cluster-manager node - false otherwise */ diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index 8fe5dcfa00db0..022e6476d8482 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -226,15 +226,6 @@ public boolean isClusterManagerEligible() { return roles.contains("master") || roles.contains("cluster_manager"); } - /** - * Returns whether or not the node could be elected cluster-manager. - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerEligible()} - */ - @Deprecated - public boolean isMasterEligible() { - return isClusterManagerEligible(); - } - /** * Returns whether or not the node stores data. */ diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index 35fd716c89e2b..bf69cf13adb52 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -433,7 +433,7 @@ public void testSnapshotShardBlobDeletionRepositoryThrowingError() throws Except createFullSnapshot(repositoryName, secondSnapshot); // Make repository to throw exception when trying to delete stale snapshot shard blobs - String clusterManagerNode = internalCluster().getMasterName(); + String clusterManagerNode = internalCluster().getClusterManagerName(); ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerNode).repository("test-repo")) .setThrowExceptionWhileDelete(true); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index 2dcfb58c3d7b8..f0e897458746c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -367,12 +367,6 @@ public boolean hasDiscoveredClusterManager() { return clusterStateHealth.hasDiscoveredClusterManager(); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #hasDiscoveredClusterManager()} */ - @Deprecated - public boolean hasDiscoveredMaster() { - return hasDiscoveredClusterManager(); - } - public int getNumberOfPendingTasks() { return this.numberOfPendingTasks; } diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 558b7370749d5..8e4d1e33b9a10 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -161,35 +161,16 @@ protected TransportClusterManagerNodeAction( protected abstract Response read(StreamInput in) throws IOException; - /** - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ClusterManagerNodeRequest, ClusterState, ActionListener)} - */ - @Deprecated - protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { - throw new UnsupportedOperationException("Must be overridden"); - } - // TODO: Add abstract keyword after removing the deprecated masterOperation() - protected void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) throws Exception { - masterOperation(request, state, listener); - } + protected abstract void clusterManagerOperation(Request request, ClusterState state, ActionListener listener) + throws Exception; /** * Override this operation if access to the task parameter is needed - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(Task, ClusterManagerNodeRequest, ClusterState, ActionListener)} */ - @Deprecated - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - clusterManagerOperation(request, state, listener); - } - - /** - * Override this operation if access to the task parameter is needed - */ - // TODO: Change the implementation to call 'clusterManagerOperation(request...)' after removing the deprecated masterOperation() protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { - masterOperation(task, request, state, listener); + clusterManagerOperation(request, state, listener); } protected boolean localExecute(Request request) { @@ -265,7 +246,7 @@ public boolean shouldRetry(Exception e) { */ @Override public Exception getTimeoutException(Exception e) { - return new ProcessClusterEventTimeoutException(request.masterNodeTimeout, actionName); + return new ProcessClusterEventTimeoutException(request.clusterManagerNodeTimeout, actionName); } protected void doStart(ClusterState clusterState) { @@ -551,17 +532,6 @@ protected String getClusterManagerActionName(DiscoveryNode node) { return actionName; } - /** - * Allows to conditionally return a different cluster-manager node action name in the case an action gets renamed. - * This mainly for backwards compatibility should be used rarely - * - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #getClusterManagerActionName(DiscoveryNode)} - */ - @Deprecated - protected String getMasterActionName(DiscoveryNode node) { - return getClusterManagerActionName(node); - } - /** * Override to true if the transport action can be executed locally and need NOT be executed always on cluster-manager (Read actions). * The action is executed locally if this method returns true AND diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java index 8a0082ad05f66..883d2e7429e2d 100644 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java @@ -77,34 +77,16 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) .indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request)); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOperation(ClusterInfoRequest, ClusterState, ActionListener)} */ - @Deprecated - protected final void masterOperation(final Request request, final ClusterState state, final ActionListener listener) { - clusterManagerOperation(request, state, listener); - } - @Override protected final void clusterManagerOperation(final Request request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); doClusterManagerOperation(request, concreteIndices, state, listener); } - // TODO: Add abstract keyword after removing the deprecated doMasterOperation() - protected void doClusterManagerOperation( + protected abstract void doClusterManagerOperation( Request request, String[] concreteIndices, ClusterState state, ActionListener listener - ) { - doMasterOperation(request, concreteIndices, state, listener); - } - - /** - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #doClusterManagerOperation(ClusterInfoRequest, String[], ClusterState, ActionListener)} - */ - @Deprecated - protected void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, ActionListener listener) { - throw new UnsupportedOperationException("Must be overridden"); - } - + ); } diff --git a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java index 904083e96032f..45d9d6cf255d6 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterChangedEvent.java @@ -216,16 +216,6 @@ public boolean localNodeClusterManager() { return state.nodes().isLocalNodeElectedClusterManager(); } - /** - * Returns true iff the local node is the mater node of the cluster. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #localNodeClusterManager()} - */ - @Deprecated - public boolean localNodeMaster() { - return localNodeClusterManager(); - } - /** * Returns the {@link org.opensearch.cluster.node.DiscoveryNodes.Delta} between * the previous cluster state and the new cluster state. diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java index 115816798959e..55351a2a998ec 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskExecutor.java @@ -59,16 +59,6 @@ default boolean runOnlyOnClusterManager() { return true; } - /** - * indicates whether this executor should only run if the current node is cluster-manager - * - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #runOnlyOnClusterManager()} - */ - @Deprecated - default boolean runOnlyOnMaster() { - return runOnlyOnClusterManager(); - } - /** * Callback invoked after new cluster state is published. Note that * this method is not invoked if the cluster state was not updated. diff --git a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java index 0cb24bd3f3eab..db81bcd3146fc 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterStateTaskListener.java @@ -57,17 +57,6 @@ default void onNoLongerClusterManager(String source) { onFailure(source, new NotClusterManagerException("no longer cluster-manager. source: [" + source + "]")); } - /** - * called when the task was rejected because the local node is no longer cluster-manager. - * Used only for tasks submitted to {@link ClusterManagerService}. - * - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #onNoLongerClusterManager(String)} - */ - @Deprecated - default void onNoLongerMaster(String source) { - onNoLongerClusterManager(source); - } - /** * Called when the result of the {@link ClusterStateTaskExecutor#execute(ClusterState, List)} have been processed * properly by all listeners. diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index 0b569901d0da1..91be118460912 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -131,19 +131,6 @@ public void updateMappingOnClusterManager(Index index, Mapping mappingUpdate, Ac } } - /** - * Update mappings on the cluster-manager node, waiting for the change to be committed, - * but not for the mapping update to be applied on all nodes. The timeout specified by - * {@code timeout} is the cluster-manager node timeout ({@link ClusterManagerNodeRequest#clusterManagerNodeTimeout()}), - * potentially waiting for a cluster-manager node to be available. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #updateMappingOnClusterManager(Index, Mapping, ActionListener)} - */ - @Deprecated - public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { - updateMappingOnClusterManager(index, mappingUpdate, listener); - } - // used by tests int blockedThreads() { return semaphore.getQueueLength(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index ec30496a3f7ad..bf2545d059955 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -120,24 +120,13 @@ public String toString() { } public boolean isBecomeClusterManagerTask() { - return reason.equals(BECOME_MASTER_TASK_REASON) || reason.equals(BECOME_CLUSTER_MANAGER_TASK_REASON); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isBecomeClusterManagerTask()} */ - @Deprecated - public boolean isBecomeMasterTask() { - return isBecomeClusterManagerTask(); + return reason.equals(BECOME_CLUSTER_MANAGER_TASK_REASON); } public boolean isFinishElectionTask() { return reason.equals(FINISH_ELECTION_TASK_REASON); } - /** - * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #BECOME_CLUSTER_MANAGER_TASK_REASON} - */ - @Deprecated - private static final String BECOME_MASTER_TASK_REASON = "_BECOME_MASTER_TASK_"; private static final String BECOME_CLUSTER_MANAGER_TASK_REASON = "_BECOME_CLUSTER_MANAGER_TASK_"; private static final String FINISH_ELECTION_TASK_REASON = "_FINISH_ELECTION_"; } @@ -379,16 +368,6 @@ public boolean runOnlyOnClusterManager() { return false; } - /** - * a task indicates that the current node should become master - * - * @deprecated As of 2.0, because supporting inclusive language, replaced by {@link #newBecomeClusterManagerTask()} - */ - @Deprecated - public static Task newBecomeMasterTask() { - return new Task(null, Task.BECOME_MASTER_TASK_REASON); - } - /** * a task indicates that the current node should become cluster-manager */ diff --git a/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java b/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java index b377fe592b0f4..377ad68892441 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/NoClusterManagerBlockService.java @@ -76,19 +76,6 @@ public class NoClusterManagerBlockService { EnumSet.of(ClusterBlockLevel.METADATA_WRITE) ); - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_ID} */ - @Deprecated - public static final int NO_MASTER_BLOCK_ID = NO_CLUSTER_MANAGER_BLOCK_ID; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_WRITES} */ - @Deprecated - public static final ClusterBlock NO_MASTER_BLOCK_WRITES = NO_CLUSTER_MANAGER_BLOCK_WRITES; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_ALL} */ - @Deprecated - public static final ClusterBlock NO_MASTER_BLOCK_ALL = NO_CLUSTER_MANAGER_BLOCK_ALL; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES} */ - @Deprecated - public static final ClusterBlock NO_MASTER_BLOCK_METADATA_WRITES = NO_CLUSTER_MANAGER_BLOCK_METADATA_WRITES; - public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>( "cluster.no_master_block", "metadata_write", @@ -133,12 +120,6 @@ public ClusterBlock getNoClusterManagerBlock() { return noClusterManagerBlock; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getNoClusterManagerBlock()} */ - @Deprecated - public ClusterBlock getNoMasterBlock() { - return noClusterManagerBlock; - } - private void setNoClusterManagerBlock(ClusterBlock noClusterManagerBlock) { this.noClusterManagerBlock = noClusterManagerBlock; } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java index 8a70c71d53fdd..f0712f69b255c 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java @@ -80,15 +80,6 @@ public Optional getClusterManagerNode() { return clusterManagerNode; } - /** - * @return the node that is currently leading, according to the responding node. - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNode()} - */ - @Deprecated - public Optional getMasterNode() { - return getClusterManagerNode(); - } - /** * @return the collection of known peers of the responding node, or an empty collection if the responding node believes there * is currently a leader. diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java index f6cfdd3c42e0c..5eeac822e7c3e 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java @@ -331,12 +331,6 @@ public boolean hasDiscoveredClusterManager() { return hasDiscoveredClusterManager; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #hasDiscoveredClusterManager()} */ - @Deprecated - public boolean hasDiscoveredMaster() { - return hasDiscoveredClusterManager(); - } - @Override public Iterator iterator() { return indices.values().iterator(); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 3af18470df787..5d20388b74e1f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1720,10 +1720,6 @@ public static class PutRequest { TimeValue clusterManagerTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; - /** @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerTimeout} */ - @Deprecated - TimeValue masterTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; - public PutRequest(String cause, String name) { this.cause = cause; this.name = name; @@ -1764,12 +1760,6 @@ public PutRequest clusterManagerTimeout(TimeValue clusterManagerTimeout) { return this; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerTimeout(TimeValue)} */ - @Deprecated - public PutRequest masterTimeout(TimeValue masterTimeout) { - return clusterManagerTimeout(masterTimeout); - } - public PutRequest version(Integer version) { this.version = version; return this; @@ -1802,10 +1792,6 @@ public static class RemoveRequest { final String name; TimeValue clusterManagerTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; - /** @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #clusterManagerTimeout} */ - @Deprecated - TimeValue masterTimeout = ClusterManagerNodeRequest.DEFAULT_CLUSTER_MANAGER_NODE_TIMEOUT; - public RemoveRequest(String name) { this.name = name; } @@ -1814,12 +1800,6 @@ public RemoveRequest clusterManagerTimeout(TimeValue clusterManagerTimeout) { this.clusterManagerTimeout = clusterManagerTimeout; return this; } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerTimeout} */ - @Deprecated - public RemoveRequest masterTimeout(TimeValue masterTimeout) { - return clusterManagerTimeout(masterTimeout); - } } /** diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index d84fb794c5e4f..12cdafdcdbf1b 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -105,12 +105,6 @@ public static boolean isClusterManagerNode(Settings settings) { return hasRole(settings, DiscoveryNodeRole.MASTER_ROLE) || hasRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode(Settings)} */ - @Deprecated - public static boolean isMasterNode(Settings settings) { - return isClusterManagerNode(settings); - } - /** * Due to the way that plugins may not be available when settings are being initialized, * not all roles may be available from a static/initializing context such as a {@link Setting} @@ -469,16 +463,6 @@ public boolean isClusterManagerNode() { return roles.contains(DiscoveryNodeRole.MASTER_ROLE) || roles.contains(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } - /** - * Can this node become cluster-manager or not. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerNode()} - */ - @Deprecated - public boolean isMasterNode() { - return isClusterManagerNode(); - } - /** * Returns a boolean that tells whether this an ingest node or not */ diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 52d830aafda38..b9169169703d4 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -124,16 +124,6 @@ public boolean isLocalNodeElectedClusterManager() { return localNodeId.equals(clusterManagerNodeId); } - /** - * Returns {@code true} if the local node is the elected cluster-manager node. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isLocalNodeElectedClusterManager()} - */ - @Deprecated - public boolean isLocalNodeElectedMaster() { - return isLocalNodeElectedClusterManager(); - } - /** * Get the number of known nodes * @@ -170,17 +160,6 @@ public Map getClusterManagerNodes() { return this.clusterManagerNodes; } - /** - * Get a {@link Map} of the discovered cluster-manager nodes arranged by their ids - * - * @return {@link Map} of the discovered cluster-manager nodes arranged by their ids - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodes()} - */ - @Deprecated - public Map getMasterNodes() { - return getClusterManagerNodes(); - } - /** * @return All the ingest nodes arranged by their ids */ @@ -199,17 +178,6 @@ public Map getClusterManagerAndDataNodes() { return Collections.unmodifiableMap(nodes); } - /** - * Get a {@link Map} of the discovered cluster-manager and data nodes arranged by their ids - * - * @return {@link Map} of the discovered cluster-manager and data nodes arranged by their ids - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerAndDataNodes()} - */ - @Deprecated - public Map getMasterAndDataNodes() { - return getClusterManagerAndDataNodes(); - } - /** * Get a {@link Map} of the coordinating only nodes (nodes which are neither cluster-manager, nor data, nor ingest nodes) arranged by their ids * @@ -233,16 +201,6 @@ public Stream clusterManagersFirstStream() { ); } - /** - * Returns a stream of all nodes, with cluster-manager nodes at the front - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagersFirstStream()} - */ - @Deprecated - public Stream mastersFirstStream() { - return clusterManagersFirstStream(); - } - /** * Get a node by its id * @@ -292,17 +250,6 @@ public String getClusterManagerNodeId() { return this.clusterManagerNodeId; } - /** - * Get the id of the cluster-manager node - * - * @return id of the cluster-manager - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodeId()} - */ - @Deprecated - public String getMasterNodeId() { - return getClusterManagerNodeId(); - } - /** * Get the id of the local node * @@ -332,17 +279,6 @@ public DiscoveryNode getClusterManagerNode() { return null; } - /** - * Returns the cluster-manager node, or {@code null} if there is no cluster-manager node - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNode()} - */ - @Deprecated - @Nullable - public DiscoveryNode getMasterNode() { - return getClusterManagerNode(); - } - /** * Get a node by its address * @@ -606,36 +542,16 @@ public boolean clusterManagerNodeChanged() { return Objects.equals(newClusterManagerNode, previousClusterManagerNode) == false; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeChanged()} */ - @Deprecated - public boolean masterNodeChanged() { - return clusterManagerNodeChanged(); - } - @Nullable public DiscoveryNode previousClusterManagerNode() { return previousClusterManagerNode; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #previousClusterManagerNode()} */ - @Deprecated - @Nullable - public DiscoveryNode previousMasterNode() { - return previousClusterManagerNode(); - } - @Nullable public DiscoveryNode newClusterManagerNode() { return newClusterManagerNode; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #newClusterManagerNode()} */ - @Deprecated - @Nullable - public DiscoveryNode newMasterNode() { - return newClusterManagerNode(); - } - public boolean removed() { return !removed.isEmpty(); } @@ -855,12 +771,6 @@ public Builder clusterManagerNodeId(String clusterManagerNodeId) { return this; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNodeId} */ - @Deprecated - public Builder masterNodeId(String clusterManagerNodeId) { - return clusterManagerNodeId(clusterManagerNodeId); - } - public Builder localNodeId(String localNodeId) { this.localNodeId = localNodeId; return this; @@ -939,12 +849,6 @@ public DiscoveryNodes build() { public boolean isLocalNodeElectedClusterManager() { return clusterManagerNodeId != null && clusterManagerNodeId.equals(localNodeId); } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isLocalNodeElectedClusterManager()} */ - @Deprecated - public boolean isLocalNodeElectedMaster() { - return isLocalNodeElectedClusterManager(); - } } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java b/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java index 2e200b6f38612..43368218e900c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/DelayedAllocationService.java @@ -239,11 +239,4 @@ private synchronized void scheduleIfNeeded(long currentNanoTime, ClusterState st protected void assertClusterOrClusterManagerStateThread() { assert ClusterService.assertClusterOrClusterManagerStateThread(); } - - // protected so that it can be overridden (and disabled) by unit tests - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #assertClusterOrClusterManagerStateThread()} */ - @Deprecated - protected void assertClusterOrMasterStateThread() { - assertClusterOrClusterManagerStateThread(); - } } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 57ade7fa10cd0..c0bb52b6b43bc 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -1586,22 +1586,6 @@ private boolean isReplicated( return true; } - /** - * Notifies the tracker of the current allocation IDs in the cluster state. - * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the cluster-manager - * @param inSyncAllocationIds the allocation IDs of the currently in-sync shard copies - * @param routingTable the shard routing table - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #updateFromClusterManager(long, Set, IndexShardRoutingTable)} - */ - @Deprecated - public synchronized void updateFromMaster( - final long applyingClusterStateVersion, - final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable - ) { - updateFromClusterManager(applyingClusterStateVersion, inSyncAllocationIds, routingTable); - } - /** * Called when the recovery process for a shard has opened the engine on the target shard. Ensures that the right data structures * have been set up locally to track local checkpoint information for the shard and that the shard is added to the replication group. diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java index 414c82b4a470f..e4c70ca28e2b4 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java @@ -40,8 +40,8 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { RestoreRemoteStoreRequest restoreRemoteStoreRequest = new RestoreRemoteStoreRequest(); - restoreRemoteStoreRequest.masterNodeTimeout( - request.paramAsTime("cluster_manager_timeout", restoreRemoteStoreRequest.masterNodeTimeout()) + restoreRemoteStoreRequest.clusterManagerNodeTimeout( + request.paramAsTime("cluster_manager_timeout", restoreRemoteStoreRequest.clusterManagerNodeTimeout()) ); restoreRemoteStoreRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false)); restoreRemoteStoreRequest.restoreAllShards(request.paramAsBoolean("restore_all_shards", false)); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java index 7fff55f1d1259..4a9c8b1bc031d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java @@ -41,7 +41,7 @@ private RestoreRemoteStoreRequest randomState(RestoreRemoteStoreRequest instance instance.restoreAllShards(randomBoolean()); if (randomBoolean()) { - instance.masterNodeTimeout(randomTimeValue()); + instance.clusterManagerNodeTimeout(randomTimeValue()); } return instance; @@ -75,7 +75,7 @@ public void testSource() throws IOException { Map map = parser.mapOrdered(); RestoreRemoteStoreRequest processed = new RestoreRemoteStoreRequest(); - processed.masterNodeTimeout(original.masterNodeTimeout()); + processed.clusterManagerNodeTimeout(original.clusterManagerNodeTimeout()); processed.waitForCompletion(original.waitForCompletion()); processed.restoreAllShards(original.restoreAllShards()); processed.source(map); diff --git a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 00198364fc8d7..5075f64937508 100644 --- a/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -328,7 +328,7 @@ public void testDeprecatedMasterOperationWithTaskParameterCanBeCalled() throws E new Action("internal:testAction", transportService, clusterService, threadPool) { @Override - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { + protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) { if (clusterManagerOperationFailure) { listener.onFailure(exception); } else { @@ -656,7 +656,7 @@ public void testThrottlingRetryLocalMaster() throws InterruptedException, Broken TransportClusterManagerNodeAction action = new Action("internal:testAction", transportService, clusterService, threadPool) { @Override - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { + protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) { if (exception.getAndSet(false)) { throw new ClusterManagerThrottlingException("Throttling Exception : Limit exceeded for test"); } else { @@ -693,7 +693,7 @@ public void testThrottlingRetryRemoteMaster() throws ExecutionException, Interru CapturingTransport.CapturedRequest[] capturedRequests = transport.getCapturedRequestsAndClear(); assertThat(capturedRequests.length, equalTo(1)); CapturingTransport.CapturedRequest capturedRequest = capturedRequests[0]; - assertTrue(capturedRequest.node.isMasterNode()); + assertTrue(capturedRequest.node.isClusterManagerNode()); assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); transport.handleRemoteError( @@ -727,7 +727,7 @@ public void testRetryForDifferentException() throws InterruptedException, Broken TransportClusterManagerNodeAction action = new Action("internal:testAction", transportService, clusterService, threadPool) { @Override - protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) + protected void clusterManagerOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { if (exception.getAndSet(false)) { throw new Exception("Different exception"); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 9590e5615d451..9b91e4d507d57 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -252,8 +252,6 @@ public void testUpdatesNodeWithNewRoles() throws Exception { * Validate isBecomeClusterManagerTask() can identify "become cluster manager task" properly */ public void testIsBecomeClusterManagerTask() { - JoinTaskExecutor.Task joinTaskOfMaster = JoinTaskExecutor.newBecomeMasterTask(); - assertThat(joinTaskOfMaster.isBecomeClusterManagerTask(), is(true)); JoinTaskExecutor.Task joinTaskOfClusterManager = JoinTaskExecutor.newBecomeClusterManagerTask(); assertThat(joinTaskOfClusterManager.isBecomeClusterManagerTask(), is(true)); } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index a1c914c69ce21..cee814b227534 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -779,7 +779,7 @@ public void testConcurrentJoining() { throw new RuntimeException(e); } - assertTrue(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); + assertTrue(ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedClusterManager()); for (DiscoveryNode successfulNode : successfulNodes) { assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode)); @@ -863,7 +863,7 @@ public void testJoinFailsWhenDecommissioned() { } private boolean isLocalNodeElectedMaster() { - return ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); + return ClusterManagerServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedClusterManager(); } private boolean clusterStateHasNode(DiscoveryNode node) { diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index b97656304c46f..b51cc26053651 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -399,13 +399,13 @@ public void offClusterManager() { ClusterState state = timedClusterApplierService.state(); DiscoveryNodes nodes = state.nodes(); - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).clusterManagerNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(true)); nodes = state.nodes(); - nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null); + nodesBuilder = DiscoveryNodes.builder(nodes).clusterManagerNodeId(null); state = ClusterState.builder(state).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); assertThat(isClusterManager.get(), is(false)); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java index d1b06d24cc797..d9e6b2d90cfca 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerServiceTests.java @@ -1288,7 +1288,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { final ClusterState initialClusterState = ClusterState.builder( new ClusterName(ClusterManagerServiceTests.class.getSimpleName()) ) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) + .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); @@ -1460,7 +1460,7 @@ public void testLongClusterStateUpdateLoggingForFailedPublication() throws Excep final ClusterState initialClusterState = ClusterState.builder( new ClusterName(ClusterManagerServiceTests.class.getSimpleName()) ) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) + .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).clusterManagerNodeId(localNode.getId())) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); @@ -1543,7 +1543,9 @@ public void testAcking() throws InterruptedException { ) { final ClusterState initialClusterState = ClusterState.builder(new ClusterName(ClusterManagerServiceTests.class.getSimpleName())) - .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).masterNodeId(node1.getId())) + .nodes( + DiscoveryNodes.builder().add(node1).add(node2).add(node3).localNodeId(node1.getId()).clusterManagerNodeId(node1.getId()) + ) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference publisherRef = new AtomicReference<>(); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index c536ce2597fd7..9ad42554a8404 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -591,11 +591,6 @@ public ClusterTasksResult execute( return null; } - @Override - public boolean runOnlyOnMaster() { - return true; - } - @Override public void clusterStatePublished(ClusterChangedEvent clusterChangedEvent) {} diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index e271a0bc8ffa3..63758cb44dc5a 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -443,12 +443,6 @@ public ClusterState joinNodesAndBecomeClusterManager(ClusterState clusterState, return runTasks(joinTaskExecutor, clusterState, joinNodes); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #joinNodesAndBecomeClusterManager(ClusterState, List)} */ - @Deprecated - public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List nodes) { - return joinNodesAndBecomeClusterManager(clusterState, nodes); - } - public ClusterState removeNodes(ClusterState clusterState, List nodes) { return runTasks( nodeRemovalExecutor, diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index d153e8d6aef53..02b5164e3d822 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -295,30 +295,6 @@ public static String blockClusterManagerFromFinalizingSnapshotOnSnapFile(final S return clusterManagerName; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerFromFinalizingSnapshotOnIndexFile(String)} */ - @Deprecated - public static String blockMasterFromFinalizingSnapshotOnIndexFile(final String repositoryName) { - return blockClusterManagerFromFinalizingSnapshotOnIndexFile(repositoryName); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerOnWriteIndexFile(String)} */ - @Deprecated - public static String blockMasterOnWriteIndexFile(final String repositoryName) { - return blockClusterManagerOnWriteIndexFile(repositoryName); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerFromDeletingIndexNFile(String)} */ - @Deprecated - public static void blockMasterFromDeletingIndexNFile(String repositoryName) { - blockClusterManagerFromDeletingIndexNFile(repositoryName); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #blockClusterManagerFromFinalizingSnapshotOnSnapFile(String)} */ - @Deprecated - public static String blockMasterFromFinalizingSnapshotOnSnapFile(final String repositoryName) { - return blockClusterManagerFromFinalizingSnapshotOnSnapFile(repositoryName); - } - public static String blockNodeWithIndex(final String repositoryName, final String indexName) { for (String node : internalCluster().nodesInclude(indexName)) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, node).repository(repositoryName)).blockOnDataFiles( @@ -783,10 +759,4 @@ protected void awaitClusterManagerFinishRepoOperations() throws Exception { } }); } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #awaitClusterManagerFinishRepoOperations()} */ - @Deprecated - protected void awaitMasterFinishRepoOperations() throws Exception { - awaitClusterManagerFinishRepoOperations(); - } } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 7b2c653e9bdb2..9ce9e293ef2b0 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -223,13 +223,6 @@ public final class InternalTestCluster extends TestCluster { public static final int DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES = 1; public static final int DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES = 3; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES} */ - @Deprecated - public static final int DEFAULT_LOW_NUM_MASTER_NODES = DEFAULT_LOW_NUM_CLUSTER_MANAGER_NODES; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES} */ - @Deprecated - public static final int DEFAULT_HIGH_NUM_MASTER_NODES = DEFAULT_HIGH_NUM_CLUSTER_MANAGER_NODES; - static final int DEFAULT_MIN_NUM_DATA_NODES = 1; static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; @@ -899,25 +892,6 @@ public Client nonClusterManagerClient() { throw new AssertionError("No non-cluster-manager client found"); } - /** - * Returns a node client to the current cluster-manager node. - * Note: use this with care tests should not rely on a certain nodes client. - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerClient()} - */ - @Deprecated - public Client masterClient() { - return clusterManagerClient(); - } - - /** - * Returns a node client to random node but not the cluster-manager. This method will fail if no non-cluster-manager client is available. - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #nonClusterManagerClient()} - */ - @Deprecated - public Client nonMasterClient() { - return nonClusterManagerClient(); - } - /** * Returns a client to a coordinating only node */ @@ -976,11 +950,6 @@ public synchronized void close() throws IOException { } } - public static final int REMOVED_MINIMUM_CLUSTER_MANAGER_NODES = Integer.MAX_VALUE; - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #REMOVED_MINIMUM_CLUSTER_MANAGER_NODES} */ - @Deprecated - public static final int REMOVED_MINIMUM_MASTER_NODES = REMOVED_MINIMUM_CLUSTER_MANAGER_NODES; - private final class NodeAndClient implements Closeable { private MockNode node; private final Settings originalNodeSettings; @@ -1016,12 +985,6 @@ public boolean isClusterManagerEligible() { return DiscoveryNode.isClusterManagerNode(node.settings()); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #isClusterManagerEligible()} */ - @Deprecated - public boolean isMasterEligible() { - return isClusterManagerEligible(); - } - Client client() { return getOrBuildNodeClient(); } @@ -1663,23 +1626,6 @@ public Iterable getDataOrClusterManagerNodeInstances(Class clazz) { return getInstances(clazz, DATA_NODE_PREDICATE.or(CLUSTER_MANAGER_NODE_PREDICATE)); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getCurrentClusterManagerNodeInstance(Class)} */ - @Deprecated - public synchronized T getCurrentMasterNodeInstance(Class clazz) { - return getCurrentClusterManagerNodeInstance(clazz); - } - - /** - * Returns an Iterable to all instances for the given class >T< across all data and cluster-manager nodes - * in the cluster. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getDataOrClusterManagerNodeInstances(Class)} - */ - @Deprecated - public Iterable getDataOrMasterNodeInstances(Class clazz) { - return getDataOrClusterManagerNodeInstances(clazz); - } - private Iterable getInstances(Class clazz, Predicate predicate) { Iterable filteredNodes = nodes.values().stream().filter(predicate)::iterator; List instances = new ArrayList<>(); @@ -1704,12 +1650,6 @@ public T getClusterManagerNodeInstance(Class clazz) { return getInstance(clazz, CLUSTER_MANAGER_NODE_PREDICATE); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerNodeInstance(Class)} */ - @Deprecated - public T getMasterNodeInstance(Class clazz) { - return getClusterManagerNodeInstance(clazz); - } - private synchronized T getInstance(Class clazz, Predicate predicate) { NodeAndClient randomNodeAndClient = getRandomNodeAndClient(predicate); assert randomNodeAndClient != null; @@ -1832,26 +1772,6 @@ public synchronized void stopRandomNodeNotCurrentClusterManager() throws IOExcep } } - /** - * Stops the current cluster-manager node forcefully. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopCurrentClusterManagerNode()} - */ - @Deprecated - public synchronized void stopCurrentMasterNode() throws IOException { - stopCurrentClusterManagerNode(); - } - - /** - * Stops any of the current nodes but not the cluster-manager node. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #stopRandomNodeNotCurrentClusterManager()} - */ - @Deprecated - public synchronized void stopRandomNodeNotCurrentMaster() throws IOException { - stopRandomNodeNotCurrentClusterManager(); - } - /** * Stops all running nodes in cluster */ @@ -2175,27 +2095,6 @@ public String getClusterManagerName(@Nullable String viaNode) { } } - /** - * Returns the name of the current cluster-manager node in the cluster. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerName()} - */ - @Deprecated - public String getMasterName() { - return getClusterManagerName(); - } - - /** - * Returns the name of the current cluster-manager node in the cluster and executes the request via the node specified - * in the viaNode parameter. If viaNode isn't specified a random node will be picked to the send the request to. - * - * @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerName(String)} - */ - @Deprecated - public String getMasterName(@Nullable String viaNode) { - return getClusterManagerName(viaNode); - } - synchronized Set allDataNodesButN(int count) { final int numNodes = numDataNodes() - count; assert size() >= numNodes; @@ -2405,18 +2304,6 @@ public List startClusterManagerOnlyNodes(int numNodes, Settings settings return startNodes(numNodes, Settings.builder().put(onlyRole(settings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)).build()); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNodes(int)} */ - @Deprecated - public List startMasterOnlyNodes(int numNodes) { - return startClusterManagerOnlyNodes(numNodes); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNodes(int, Settings)} */ - @Deprecated - public List startMasterOnlyNodes(int numNodes, Settings settings) { - return startClusterManagerOnlyNodes(numNodes, settings); - } - public List startDataAndSearchNodes(int numNodes) { return startDataAndSearchNodes(numNodes, Settings.EMPTY); } @@ -2466,18 +2353,6 @@ public String startClusterManagerOnlyNode(Settings settings) { return startNode(settings1); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNode()} */ - @Deprecated - public String startMasterOnlyNode() { - return startClusterManagerOnlyNode(); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #startClusterManagerOnlyNode(Settings)} */ - @Deprecated - public String startMasterOnlyNode(Settings settings) { - return startClusterManagerOnlyNode(settings); - } - public String startDataOnlyNode() { return startDataOnlyNode(Settings.EMPTY); } @@ -2521,12 +2396,6 @@ public int numClusterManagerNodes() { return filterNodes(nodes, NodeAndClient::isClusterManagerEligible).size(); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #numClusterManagerNodes()} */ - @Deprecated - public int numMasterNodes() { - return numClusterManagerNodes(); - } - public void setDisruptionScheme(ServiceDisruptionScheme scheme) { assert activeDisruptionScheme == null : "there is already and active disruption [" + activeDisruptionScheme diff --git a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java index 4285ac76fc4d4..a3b4431c5aeb8 100644 --- a/test/framework/src/main/java/org/opensearch/test/NodeRoles.java +++ b/test/framework/src/main/java/org/opensearch/test/NodeRoles.java @@ -192,42 +192,6 @@ public static Settings nonClusterManagerNode(final Settings settings) { return removeRoles(settings, Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNode()} */ - @Deprecated - public static Settings masterNode() { - return clusterManagerNode(); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerNode(Settings)} */ - @Deprecated - public static Settings masterNode(final Settings settings) { - return clusterManagerNode(settings); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOnlyNode()} */ - @Deprecated - public static Settings masterOnlyNode() { - return clusterManagerOnlyNode(); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerOnlyNode(Settings)} */ - @Deprecated - public static Settings masterOnlyNode(final Settings settings) { - return clusterManagerOnlyNode(settings); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #nonClusterManagerNode()} */ - @Deprecated - public static Settings nonMasterNode() { - return nonClusterManagerNode(); - } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #nonClusterManagerNode(Settings)} */ - @Deprecated - public static Settings nonMasterNode(final Settings settings) { - return nonClusterManagerNode(settings); - } - public static Settings remoteClusterClientNode() { return remoteClusterClientNode(Settings.EMPTY); } diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index f5a439e2ffd02..16cc797e6d792 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -129,19 +129,7 @@ public void assertAfterTest() throws Exception { /** * Returns the number of data and cluster-manager eligible nodes in the cluster. */ - // TODO: Add abstract keyword after removing the deprecated numDataAndMasterNodes() - public int numDataAndClusterManagerNodes() { - return numDataAndMasterNodes(); - } - - /** - * Returns the number of data and cluster-manager eligible nodes in the cluster. - * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link #numDataAndClusterManagerNodes()} - */ - @Deprecated - public int numDataAndMasterNodes() { - throw new UnsupportedOperationException("Must be overridden"); - } + public abstract int numDataAndClusterManagerNodes(); /** * Returns the http addresses of the nodes within the cluster. diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java index 849d7e4685a76..ec15e5bf03628 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java @@ -109,12 +109,6 @@ public Version getClusterManagerVersion() { return clusterManagerVersion; } - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #getClusterManagerVersion()} */ - @Deprecated - public Version getMasterVersion() { - return getClusterManagerVersion(); - } - /** * Calls an api with the provided parameters and body */ diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 2095a8c5eaa1f..d150e214a49c3 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -231,11 +231,4 @@ public Version esVersion() { public Version clusterManagerVersion() { return clientYamlTestClient.getClusterManagerVersion(); } - - /** @deprecated As of 2.2, because supporting inclusive language, replaced by {@link #clusterManagerVersion()} */ - @Deprecated - public Version masterVersion() { - return clusterManagerVersion(); - } - } From ba6cf6b138b967686d98ade2363443409f83852d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 10:01:00 -0500 Subject: [PATCH 46/48] Bump org.awaitility:awaitility from 4.2.0 to 4.2.2 in /server (#17230) * Bump org.awaitility:awaitility from 4.2.0 to 4.2.2 in /server Bumps [org.awaitility:awaitility](https://github.com/awaitility/awaitility) from 4.2.0 to 4.2.2. - [Changelog](https://github.com/awaitility/awaitility/blob/master/changelog.txt) - [Commits](https://github.com/awaitility/awaitility/compare/awaitility-4.2.0...awaitility-4.2.2) --- updated-dependencies: - dependency-name: org.awaitility:awaitility dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + server/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2e6ef0307ba5..0076f44f268e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -76,6 +76,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.re2j:re2j` from 1.7 to 1.8 ([#17012](https://github.com/opensearch-project/OpenSearch/pull/17012)) - Bump `com.squareup.okio:okio` from 3.9.1 to 3.10.2 ([#17060](https://github.com/opensearch-project/OpenSearch/pull/17060)) - Bump `org.jruby.jcodings:jcodings` from 1.0.58 to 1.0.61 ([#17061](https://github.com/opensearch-project/OpenSearch/pull/17061)) +- Bump `org.awaitility:awaitility` from 4.2.0 to 4.2.2 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) diff --git a/server/build.gradle b/server/build.gradle index e7541cfdccdff..5d98874cbef23 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -113,7 +113,7 @@ dependencies { // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap api libs.roaringbitmap - testImplementation 'org.awaitility:awaitility:4.2.0' + testImplementation 'org.awaitility:awaitility:4.2.2' testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' From bcf646d5cc60859e2ae009fa3c7d539537b58bfe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 09:20:42 -0800 Subject: [PATCH 47/48] Bump dnsjava:dnsjava from 3.6.2 to 3.6.3 in /test/fixtures/hdfs-fixture (#17231) --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0076f44f268e3..baced39bacfce 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,6 +77,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.squareup.okio:okio` from 3.9.1 to 3.10.2 ([#17060](https://github.com/opensearch-project/OpenSearch/pull/17060)) - Bump `org.jruby.jcodings:jcodings` from 1.0.58 to 1.0.61 ([#17061](https://github.com/opensearch-project/OpenSearch/pull/17061)) - Bump `org.awaitility:awaitility` from 4.2.0 to 4.2.2 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230)) +- Bump `dnsjava:dnsjava` from 3.6.2 to 3.6.3 ([#17231](https://github.com/opensearch-project/OpenSearch/pull/17231)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index ea26d24c862b0..45aff1b802051 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -55,7 +55,7 @@ dependencies { exclude group: 'com.nimbusds' exclude module: "commons-configuration2" } - api "dnsjava:dnsjava:3.6.2" + api "dnsjava:dnsjava:3.6.3" api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" api "commons-codec:commons-codec:${versions.commonscodec}" From faabd10a28b1885fa25b206527b1b0cc5153f656 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 13:57:41 -0500 Subject: [PATCH 48/48] Bump com.google.code.gson:gson from 2.11.0 to 2.12.1 in /plugins/repository-hdfs (#17229) * Bump com.google.code.gson:gson in /plugins/repository-hdfs Bumps [com.google.code.gson:gson](https://github.com/google/gson) from 2.11.0 to 2.12.1. - [Release notes](https://github.com/google/gson/releases) - [Changelog](https://github.com/google/gson/blob/main/CHANGELOG.md) - [Commits](https://github.com/google/gson/compare/gson-parent-2.11.0...gson-parent-2.12.1) --- updated-dependencies: - dependency-name: com.google.code.gson:gson dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index baced39bacfce..9d1f544f2f03a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.jruby.jcodings:jcodings` from 1.0.58 to 1.0.61 ([#17061](https://github.com/opensearch-project/OpenSearch/pull/17061)) - Bump `org.awaitility:awaitility` from 4.2.0 to 4.2.2 ([#17230](https://github.com/opensearch-project/OpenSearch/pull/17230)) - Bump `dnsjava:dnsjava` from 3.6.2 to 3.6.3 ([#17231](https://github.com/opensearch-project/OpenSearch/pull/17231)) +- Bump `com.google.code.gson:gson` from 2.11.0 to 2.12.1 ([#17229](https://github.com/opensearch-project/OpenSearch/pull/17229)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index c2685a525c8ba..cf76c88c5482e 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.12.0' - api 'com.google.code.gson:gson:2.11.0' + api 'com.google.code.gson:gson:2.12.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.9.0' diff --git a/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 deleted file mode 100644 index 0414a49526895..0000000000000 --- a/plugins/repository-hdfs/licenses/gson-2.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -527175ca6d81050b53bdd4c457a6d6e017626b0e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 b/plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 new file mode 100644 index 0000000000000..7d57e885daa08 --- /dev/null +++ b/plugins/repository-hdfs/licenses/gson-2.12.1.jar.sha1 @@ -0,0 +1 @@ +4e773a317740b83b43cfc3d652962856041697cb \ No newline at end of file