From 1f94b34197347ef2027170aae455c62386b2a342 Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Fri, 3 Jan 2025 13:02:37 -0800 Subject: [PATCH 01/37] Add benchmark confirm for lucene-10 big5 index snapshot (#16940) Signed-off-by: Rishabh Singh --- .github/benchmark-configs.json | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 732f2f9b96ae3..b3590f8a2f942 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -239,5 +239,22 @@ "data_instance_config": "4vCPU, 32G Mem, 16G Heap" }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_15": { + "description": "Search only test-procedure for big5, uses lucene-10 index snapshot to restore the data for OS-3.0.0", + "supported_major_versions": ["3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "big5", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-3x\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-3x\",\"snapshot_name\":\"big5_1_shard_single_client\"}", + "CAPTURE_NODE_STAT": "true", + "TEST_PROCEDURE": "restore-from-snapshot" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" } } From 703eddab489607ee25a7db8428c076d89826b7c6 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 3 Jan 2025 18:07:04 -0500 Subject: [PATCH 02/37] Remove duplicate DCO check (#16942) Signed-off-by: Andriy Redko --- .github/workflows/dco.yml | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 .github/workflows/dco.yml diff --git a/.github/workflows/dco.yml b/.github/workflows/dco.yml deleted file mode 100644 index ef842bb405d60..0000000000000 --- a/.github/workflows/dco.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Developer Certificate of Origin Check - -on: [pull_request] - -jobs: - dco-check: - runs-on: ubuntu-latest - - steps: - - name: Get PR Commits - id: 'get-pr-commits' - uses: tim-actions/get-pr-commits@v1.3.1 - with: - token: ${{ secrets.GITHUB_TOKEN }} - - name: DCO Check - uses: tim-actions/dco@v1.1.0 - with: - commits: ${{ steps.get-pr-commits.outputs.commits }} - From 845fbfa10407f3264e6aab8812eff4ef0ad8be24 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Fri, 3 Jan 2025 18:11:49 -0500 Subject: [PATCH 03/37] Allow extended plugins to be optional (#16909) * Make extended plugins optional Signed-off-by: Craig Perkins * Make extended plugins optional Signed-off-by: Craig Perkins * Load extensions for classpath plugins Signed-off-by: Craig Perkins * Ensure only single instance for each classpath extension Signed-off-by: Craig Perkins * Add test for classpath plugin extended plugin loading Signed-off-by: Craig Perkins * Modify test to allow optional extended plugin Signed-off-by: Craig Perkins * Only optional extended plugins Signed-off-by: Craig Perkins * Add additional warning message Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * Add tag to make extended plugin optional Signed-off-by: Craig Perkins * Only send plugin names when serializing PluginInfo Signed-off-by: Craig Perkins * Keep track of optional extended plugins in separate set Signed-off-by: Craig Perkins * Include in ser/de of PluginInfo Signed-off-by: Craig Perkins * Change to 3_0_0 Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../org/opensearch/plugins/PluginInfo.java | 33 +++++++++++++++++-- .../opensearch/plugins/PluginsService.java | 15 ++++++++- .../opensearch/plugins/PluginInfoTests.java | 27 +++++++++++++++ .../plugins/PluginsServiceTests.java | 29 +++++++++++++++- 5 files changed, 101 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 45bc56b505eb3..5f813fecf66cf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) +- Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909)) ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index b6030f4ded5e5..7173a653ebc9a 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -86,6 +86,8 @@ public class PluginInfo implements Writeable, ToXContentObject { private final String classname; private final String customFolderName; private final List extendedPlugins; + // Optional extended plugins are a subset of extendedPlugins that only contains the optional extended plugins + private final List optionalExtendedPlugins; private final boolean hasNativeController; /** @@ -149,7 +151,11 @@ public PluginInfo( this.javaVersion = javaVersion; this.classname = classname; this.customFolderName = customFolderName; - this.extendedPlugins = Collections.unmodifiableList(extendedPlugins); + this.extendedPlugins = extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); + this.optionalExtendedPlugins = extendedPlugins.stream() + .filter(PluginInfo::isOptionalExtension) + .map(s -> s.split(";")[0]) + .collect(Collectors.toUnmodifiableList()); this.hasNativeController = hasNativeController; } @@ -209,6 +215,16 @@ public PluginInfo(final StreamInput in) throws IOException { this.customFolderName = in.readString(); this.extendedPlugins = in.readStringList(); this.hasNativeController = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + this.optionalExtendedPlugins = in.readStringList(); + } else { + this.optionalExtendedPlugins = new ArrayList<>(); + } + } + + static boolean isOptionalExtension(String extendedPlugin) { + String[] dependency = extendedPlugin.split(";"); + return dependency.length > 1 && "optional=true".equals(dependency[1]); } @Override @@ -234,6 +250,9 @@ This works for currently supported range notations (=,~) } out.writeStringCollection(extendedPlugins); out.writeBoolean(hasNativeController); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeStringCollection(optionalExtendedPlugins); + } } /** @@ -417,8 +436,17 @@ public String getFolderName() { * * @return the names of the plugins extended */ + public boolean isExtendedPluginOptional(String extendedPlugin) { + return optionalExtendedPlugins.contains(extendedPlugin); + } + + /** + * Other plugins this plugin extends through SPI + * + * @return the names of the plugins extended + */ public List getExtendedPlugins() { - return extendedPlugins; + return extendedPlugins.stream().map(s -> s.split(";")[0]).collect(Collectors.toUnmodifiableList()); } /** @@ -493,6 +521,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("custom_foldername", customFolderName); builder.field("extended_plugins", extendedPlugins); builder.field("has_native_controller", hasNativeController); + builder.field("optional_extended_plugins", optionalExtendedPlugins); } builder.endObject(); diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index f08c9c738f1b4..9bc1f1334122e 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -524,7 +524,13 @@ private static void addSortedBundle( for (String dependency : bundle.plugin.getExtendedPlugins()) { Bundle depBundle = bundles.get(dependency); if (depBundle == null) { - throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + if (bundle.plugin.isExtendedPluginOptional(dependency)) { + logger.warn("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + logger.warn("Some features of this plugin may not function without the dependencies being installed.\n"); + continue; + } else { + throw new IllegalArgumentException("Missing plugin [" + dependency + "], dependency of [" + name + "]"); + } } addSortedBundle(depBundle, bundles, sortedBundles, dependencyStack); assert sortedBundles.contains(depBundle); @@ -653,6 +659,9 @@ static void checkBundleJarHell(Set classpath, Bundle bundle, Map urls = new HashSet<>(); for (String extendedPlugin : exts) { Set pluginUrls = transitiveUrls.get(extendedPlugin); + if (pluginUrls == null && bundle.plugin.isExtendedPluginOptional(extendedPlugin)) { + continue; + } assert pluginUrls != null : "transitive urls should have already been set for " + extendedPlugin; Set intersection = new HashSet<>(urls); @@ -704,6 +713,10 @@ private Plugin loadBundle(Bundle bundle, Map loaded) { List extendedLoaders = new ArrayList<>(); for (String extendedPluginName : bundle.plugin.getExtendedPlugins()) { Plugin extendedPlugin = loaded.get(extendedPluginName); + if (extendedPlugin == null && bundle.plugin.isExtendedPluginOptional(extendedPluginName)) { + // extended plugin is optional and is not installed + continue; + } assert extendedPlugin != null; if (ExtensiblePlugin.class.isInstance(extendedPlugin) == false) { throw new IllegalStateException("Plugin [" + name + "] cannot extend non-extensible plugin [" + extendedPluginName + "]"); diff --git a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java index 12c7dc870c104..76294d85c64d4 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginInfoTests.java @@ -44,6 +44,7 @@ import org.opensearch.semver.SemverRange; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; @@ -55,6 +56,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class PluginInfoTests extends OpenSearchTestCase { @@ -281,6 +283,30 @@ public void testReadFromPropertiesJvmMissingClassname() throws Exception { assertThat(e.getMessage(), containsString("property [classname] is missing")); } + public void testExtendedPluginsSingleOptionalExtension() throws IOException { + Path pluginDir = createTempDir().resolve("fake-plugin"); + PluginTestUtil.writePluginProperties( + pluginDir, + "description", + "fake desc", + "name", + "my_plugin", + "version", + "1.0", + "opensearch.version", + Version.CURRENT.toString(), + "java.version", + System.getProperty("java.specification.version"), + "classname", + "FakePlugin", + "extended.plugins", + "foo;optional=true" + ); + PluginInfo info = PluginInfo.readFromProperties(pluginDir); + assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(true)); + } + public void testExtendedPluginsSingleExtension() throws Exception { Path pluginDir = createTempDir().resolve("fake-plugin"); PluginTestUtil.writePluginProperties( @@ -302,6 +328,7 @@ public void testExtendedPluginsSingleExtension() throws Exception { ); PluginInfo info = PluginInfo.readFromProperties(pluginDir); assertThat(info.getExtendedPlugins(), contains("foo")); + assertThat(info.isExtendedPluginOptional("foo"), is(false)); } public void testExtendedPluginsMultipleExtensions() throws Exception { diff --git a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java index bd9ee33856f14..f5702fa1a7ade 100644 --- a/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java +++ b/server/src/test/java/org/opensearch/plugins/PluginsServiceTests.java @@ -361,7 +361,7 @@ public void testSortBundlesNoDeps() throws Exception { assertThat(sortedBundles, Matchers.contains(bundle1, bundle2, bundle3)); } - public void testSortBundlesMissingDep() throws Exception { + public void testSortBundlesMissingRequiredDep() throws Exception { Path pluginDir = createTempDir(); PluginInfo info = new PluginInfo("foo", "desc", "1.0", Version.CURRENT, "1.8", "MyPlugin", Collections.singletonList("dne"), false); PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); @@ -372,6 +372,33 @@ public void testSortBundlesMissingDep() throws Exception { assertEquals("Missing plugin [dne], dependency of [foo]", e.getMessage()); } + public void testSortBundlesMissingOptionalDep() throws Exception { + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(PluginsService.class))) { + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "[.test] warning", + "org.opensearch.plugins.PluginsService", + Level.WARN, + "Missing plugin [dne], dependency of [foo]" + ) + ); + Path pluginDir = createTempDir(); + PluginInfo info = new PluginInfo( + "foo", + "desc", + "1.0", + Version.CURRENT, + "1.8", + "MyPlugin", + Collections.singletonList("dne;optional=true"), + false + ); + PluginsService.Bundle bundle = new PluginsService.Bundle(info, pluginDir); + PluginsService.sortBundles(Collections.singleton(bundle)); + mockLogAppender.assertAllExpectationsMatched(); + } + } + public void testSortBundlesCommonDep() throws Exception { Path pluginDir = createTempDir(); Set bundles = new LinkedHashSet<>(); // control iteration order From c0f7806753c74776465bb483f0201bc5897c15a2 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Fri, 3 Jan 2025 21:48:15 -0500 Subject: [PATCH 04/37] Change version in PluginInfo to V_2_19_0 after backport to 2.x merged (#16947) Signed-off-by: Craig Perkins --- server/src/main/java/org/opensearch/plugins/PluginInfo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/plugins/PluginInfo.java b/server/src/main/java/org/opensearch/plugins/PluginInfo.java index 7173a653ebc9a..4ff699e8017ba 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginInfo.java +++ b/server/src/main/java/org/opensearch/plugins/PluginInfo.java @@ -215,7 +215,7 @@ public PluginInfo(final StreamInput in) throws IOException { this.customFolderName = in.readString(); this.extendedPlugins = in.readStringList(); this.hasNativeController = in.readBoolean(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { this.optionalExtendedPlugins = in.readStringList(); } else { this.optionalExtendedPlugins = new ArrayList<>(); @@ -250,7 +250,7 @@ This works for currently supported range notations (=,~) } out.writeStringCollection(extendedPlugins); out.writeBoolean(hasNativeController); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { out.writeStringCollection(optionalExtendedPlugins); } } From d7641ca8788441e384fbde6c58b6f2530ec8772d Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Mon, 6 Jan 2025 14:06:16 +0530 Subject: [PATCH 05/37] Support object fields in star-tree index (#16728) --------- Signed-off-by: bharath-techie --- CHANGELOG.md | 2 + .../index/mapper/StarTreeMapperIT.java | 371 +++++++++++++++++- .../index/mapper/DocumentParser.java | 14 +- .../index/mapper/MapperService.java | 25 ++ .../index/mapper/StarTreeMapper.java | 43 +- .../index/mapper/StarTreeMapperTests.java | 79 +++- 6 files changed, 528 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f813fecf66cf..0efb53beb6e31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce a setting to disable download of full cluster state from remote on term mismatch([#16798](https://github.com/opensearch-project/OpenSearch/pull/16798/)) - Added ability to retrieve value from DocValues in a flat_object filed([#16802](https://github.com/opensearch-project/OpenSearch/pull/16802)) - Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534)) +- Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/)) +- Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java index 3f9053576329c..1d01f717aad1f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/StarTreeMapperIT.java @@ -26,6 +26,8 @@ import org.opensearch.index.compositeindex.datacube.DataCubeDateTimeUnit; import org.opensearch.index.compositeindex.datacube.DateDimension; import org.opensearch.index.compositeindex.datacube.MetricStat; +import org.opensearch.index.compositeindex.datacube.NumericDimension; +import org.opensearch.index.compositeindex.datacube.OrdinalDimension; import org.opensearch.index.compositeindex.datacube.startree.StarTreeFieldConfiguration; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; import org.opensearch.index.compositeindex.datacube.startree.utils.date.DateTimeUnitAdapter; @@ -41,6 +43,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Set; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -121,6 +124,187 @@ private static XContentBuilder createMinimalTestMapping(boolean invalidDim, bool } } + private static XContentBuilder createNestedTestMapping() { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startObject("date_dimension") + .field("name", "timestamp") + .endObject() + .startArray("ordered_dimensions") + .startObject() + .field("name", "nested.nested1.status") + .endObject() + .startObject() + .field("name", "nested.nested1.keyword_dv") + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "nested3.numeric_dv") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("nested3") + .startObject("properties") + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("nested") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("nested-not-startree") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .startObject("ip") + .field("type", "ip") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + + private static XContentBuilder createNestedTestMappingForArray() { + try { + return jsonBuilder().startObject() + .startObject("composite") + .startObject("startree-1") + .field("type", "star_tree") + .startObject("config") + .startObject("date_dimension") + .field("name", "timestamp") + .endObject() + .startArray("ordered_dimensions") + .startObject() + .field("name", "status") + .endObject() + .startObject() + .field("name", "nested.nested1.keyword_dv") + .endObject() + .endArray() + .startArray("metrics") + .startObject() + .field("name", "nested3.numeric_dv") + .endObject() + .endArray() + .endObject() + .endObject() + .endObject() + .startObject("properties") + .startObject("timestamp") + .field("type", "date") + .endObject() + .startObject("status") + .field("type", "integer") + .endObject() + .startObject("nested3") + .startObject("properties") + .startObject("numeric_dv") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .startObject("numeric") + .field("type", "integer") + .field("doc_values", false) + .endObject() + .startObject("nested") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("nested-not-startree") + .startObject("properties") + .startObject("nested1") + .startObject("properties") + .startObject("status") + .field("type", "integer") + .field("doc_values", true) + .endObject() + .startObject("keyword_dv") + .field("type", "keyword") + .field("doc_values", true) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .startObject("keyword") + .field("type", "keyword") + .field("doc_values", false) + .endObject() + .startObject("ip") + .field("type", "ip") + .field("doc_values", false) + .endObject() + .endObject() + .endObject(); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } + private static XContentBuilder createDateTestMapping(boolean duplicate) { try { return jsonBuilder().startObject() @@ -475,6 +659,46 @@ public void testValidCompositeIndexWithDates() { } } + public void testValidCompositeIndexWithNestedFields() { + prepareCreate(TEST_INDEX).setMapping(createNestedTestMapping()).setSettings(settings).get(); + Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); + for (IndicesService service : dataNodeInstances) { + final Index index = resolveIndex("test"); + if (service.hasIndex(index)) { + IndexService indexService = service.indexService(index); + Set fts = indexService.mapperService().getCompositeFieldTypes(); + + for (CompositeMappedFieldType ft : fts) { + assertTrue(ft instanceof StarTreeMapper.StarTreeFieldType); + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) ft; + assertEquals("timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedTimeUnits = Arrays.asList( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR), + DataCubeDateTimeUnit.HALF_HOUR_OF_DAY + ); + for (int i = 0; i < dateDim.getIntervals().size(); i++) { + assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); + } + assertEquals("nested.nested1.status", starTreeFieldType.getDimensions().get(1).getField()); + assertTrue(starTreeFieldType.getDimensions().get(1) instanceof NumericDimension); + assertEquals("nested.nested1.keyword_dv", starTreeFieldType.getDimensions().get(2).getField()); + assertTrue(starTreeFieldType.getDimensions().get(2) instanceof OrdinalDimension); + assertEquals("nested3.numeric_dv", starTreeFieldType.getMetrics().get(0).getField()); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals( + StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, + starTreeFieldType.getStarTreeConfig().getBuildMode() + ); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + } + } + public void testValidCompositeIndexWithDuplicateDates() { prepareCreate(TEST_INDEX).setMapping(createDateTestMapping(true)).setSettings(settings).get(); Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); @@ -563,11 +787,156 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException { () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() ); assertEquals( - "object mapping for [_doc] with array for [numeric_dv] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [_doc] with array for [numeric_dv] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", ex.getMessage() ); } + public void testCompositeIndexWithArraysInNestedCompositeField() throws IOException { + // here nested.nested1.status is part of the composite field but "nested" field itself is an array + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startArray("nested") + .startObject() + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .endArray() + .endObject() + .endArray() + .endObject(); + // Index the document and refresh + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() + ); + assertEquals( + "object mapping for [_doc] with array for [nested] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + } + + public void testCompositeIndexWithArraysInChildNestedCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // here nested.nested1.status is part of the composite field but "nested.nested1" field is an array + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested") + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 10) + .endObject() + .endArray() + .endObject() + .endObject(); + // Index the document and refresh + MapperParsingException ex = expectThrows( + MapperParsingException.class, + () -> client().prepareIndex(TEST_INDEX).setSource(doc).get() + ); + assertEquals( + "object mapping for [nested] with array for [nested1] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", + ex.getMessage() + ); + } + + public void testCompositeIndexWithArraysInNestedCompositeFieldSameNameAsNormalField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMappingForArray()).get(); + // here status is part of the composite field but "nested.nested1.status" field is an array which is not + // part of composite field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested") + .startObject("nested1") + .startArray("status") + .value(10) + .value(20) + .value(30) + .endArray() + .endObject() + .endObject() + .field("status", "200") + .endObject(); + // Index the document and refresh + // Index the document and refresh + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get(); + + assertEquals(RestStatus.CREATED, indexResponse.status()); + + client().admin().indices().prepareRefresh(TEST_INDEX).get(); + // Verify the document was indexed + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get(); + + assertEquals(1, searchResponse.getHits().getTotalHits().value); + + // Verify the values in the indexed document + SearchHit hit = searchResponse.getHits().getAt(0); + assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp")); + + int values = Integer.parseInt((String) hit.getSourceAsMap().get("status")); + assertEquals(200, values); + } + + public void testCompositeIndexWithNestedArraysInNonCompositeField() throws IOException { + prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createNestedTestMapping()).get(); + // Attempt to index a document with an array field + XContentBuilder doc = jsonBuilder().startObject() + .field("timestamp", "2023-06-01T12:00:00Z") + .startObject("nested-not-startree") + .startArray("nested1") + .startObject() + .field("status", 10) + .endObject() + .startObject() + .field("status", 20) + .endObject() + .startObject() + .field("status", 30) + .endObject() + .endArray() + .endObject() + .endObject(); + + // Index the document and refresh + IndexResponse indexResponse = client().prepareIndex(TEST_INDEX).setSource(doc).get(); + + assertEquals(RestStatus.CREATED, indexResponse.status()); + + client().admin().indices().prepareRefresh(TEST_INDEX).get(); + // Verify the document was indexed + SearchResponse searchResponse = client().prepareSearch(TEST_INDEX).setQuery(QueryBuilders.matchAllQuery()).get(); + + assertEquals(1, searchResponse.getHits().getTotalHits().value); + + // Verify the values in the indexed document + SearchHit hit = searchResponse.getHits().getAt(0); + assertEquals("2023-06-01T12:00:00Z", hit.getSourceAsMap().get("timestamp")); + + List values = (List) ((Map) (hit.getSourceAsMap().get("nested-not-startree"))).get("nested1"); + assertEquals(3, values.size()); + int i = 1; + for (Object val : values) { + Map valMap = (Map) val; + assertEquals(10 * i, valMap.get("status")); + i++; + } + } + public void testCompositeIndexWithArraysInNonCompositeField() throws IOException { prepareCreate(TEST_INDEX).setSettings(settings).setMapping(createMinimalTestMapping(false, false, false)).get(); // Attempt to index a document with an array field diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java index 50ff816695156..134baa70f80c2 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentParser.java @@ -661,12 +661,22 @@ private static void parseNonDynamicArray(ParseContext context, ObjectMapper mapp throws IOException { XContentParser parser = context.parser(); XContentParser.Token token; + String path = context.path().pathAsText(arrayFieldName); + boolean isNested = path.contains(".") || context.mapperService().isCompositeIndexFieldNestedField(path); // block array values for composite index fields - if (context.indexSettings().isCompositeIndex() && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) { + // Assume original index has 2 fields - status , nested.nested1.status + // case 1 : if status is part of composite index and nested.nested1.status is not part of composite index, + // then nested.nested1.status/nested.nested1/nested array should not be blocked + // case 2 : if nested.nested1.status is part of composite index and status is not part of composite index, + // then arrays in nested/nested.nested1 and nested.nested1.status fields should be blocked + // but arrays in status should not be blocked + if (context.indexSettings().isCompositeIndex() + && ((isNested == false && context.mapperService().isFieldPartOfCompositeIndex(arrayFieldName)) + || (isNested && context.mapperService().isCompositeIndexFieldNestedField(path)))) { throw new MapperParsingException( String.format( Locale.ROOT, - "object mapping for [%s] with array for [%s] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [%s] with array for [%s] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", mapper.name(), arrayFieldName ) diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index 84b0b1d69432d..5a7c6a0102052 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -228,6 +228,7 @@ public enum MergeReason { private volatile Set compositeMappedFieldTypes; private volatile Set fieldsPartOfCompositeMappings; + private volatile Set nestedFieldsPartOfCompositeMappings; public MapperService( IndexSettings indexSettings, @@ -554,10 +555,29 @@ private synchronized Map internalMerge(DocumentMapper ma private void buildCompositeFieldLookup() { Set fieldsPartOfCompositeMappings = new HashSet<>(); + Set nestedFieldsPartOfCompositeMappings = new HashSet<>(); + for (CompositeMappedFieldType fieldType : compositeMappedFieldTypes) { fieldsPartOfCompositeMappings.addAll(fieldType.fields()); + + for (String field : fieldType.fields()) { + String[] parts = field.split("\\."); + if (parts.length > 1) { + StringBuilder path = new StringBuilder(); + for (int i = 0; i < parts.length; i++) { + if (i == 0) { + path.append(parts[i]); + } else { + path.append(".").append(parts[i]); + } + nestedFieldsPartOfCompositeMappings.add(path.toString()); + } + } + } } + this.fieldsPartOfCompositeMappings = fieldsPartOfCompositeMappings; + this.nestedFieldsPartOfCompositeMappings = nestedFieldsPartOfCompositeMappings; } private boolean assertSerialization(DocumentMapper mapper) { @@ -690,6 +710,11 @@ public boolean isFieldPartOfCompositeIndex(String field) { return fieldsPartOfCompositeMappings.contains(field); } + public boolean isCompositeIndexFieldNestedField(String field) { + return nestedFieldsPartOfCompositeMappings.contains(field); + + } + public ObjectMapper getObjectMapper(String name) { return this.mapper == null ? null : this.mapper.objectMappers().get(name); } diff --git a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java index 40f05a8b76755..7b361e12330a3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/StarTreeMapper.java @@ -23,6 +23,7 @@ import org.opensearch.search.lookup.SearchLookup; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -431,8 +432,46 @@ private static boolean isBuilderAllowedForMetric(Mapper.Builder builder) { return builder.isDataCubeMetricSupported(); } - private Optional findMapperBuilderByName(String field, List mappersBuilders) { - return mappersBuilders.stream().filter(builder -> builder.name().equals(field)).findFirst(); + private Optional findMapperBuilderByName(String name, List mappersBuilders) { + String[] parts = name.split("\\."); + + // Start with the top-level builders + Optional currentBuilder = mappersBuilders.stream() + .filter(builder -> builder.name().equals(parts[0])) + .findFirst(); + + // If we can't find the first part, or if there's only one part, return the result + if (currentBuilder.isEmpty() || parts.length == 1) { + return currentBuilder; + } + + // Navigate through the nested structure + try { + Mapper.Builder builder = currentBuilder.get(); + for (int i = 1; i < parts.length; i++) { + List childBuilders = getChildBuilders(builder); + int finalI = i; + builder = childBuilders.stream() + .filter(b -> b.name().equals(parts[finalI])) + .findFirst() + .orElseThrow( + () -> new IllegalArgumentException( + String.format(Locale.ROOT, "Could not find nested field [%s] in path [%s]", parts[finalI], name) + ) + ); + } + return Optional.of(builder); + } catch (Exception e) { + return Optional.empty(); + } + } + + // Helper method to get child builders from a parent builder + private List getChildBuilders(Mapper.Builder builder) { + if (builder instanceof ObjectMapper.Builder) { + return ((ObjectMapper.Builder) builder).mappersBuilders; + } + return Collections.emptyList(); } public Builder(String name, ObjectMapper.Builder objBuilder) { diff --git a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java index 333cdbcab05c5..684704ad65b0a 100644 --- a/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/StarTreeMapperTests.java @@ -111,7 +111,7 @@ public void testCompositeIndexWithArraysInCompositeField() throws IOException { () -> mapper.parse(source(b -> b.startArray("status").value(0).value(1).endArray())) ); assertEquals( - "object mapping for [_doc] with array for [status] cannot be accepted as field is also part of composite index mapping which does not accept arrays", + "object mapping for [_doc] with array for [status] cannot be accepted, as the field is also part of composite index mapping which does not accept arrays", ex.getMessage() ); ParsedDocument doc = mapper.parse(source(b -> b.startArray("size").value(0).value(1).endArray())); @@ -284,6 +284,33 @@ public void testValidStarTreeDateDims() throws IOException { } } + public void testValidStarTreeNestedFields() throws IOException { + MapperService mapperService = createMapperService(getMinMappingWithNestedField()); + Set compositeFieldTypes = mapperService.getCompositeFieldTypes(); + for (CompositeMappedFieldType type : compositeFieldTypes) { + StarTreeMapper.StarTreeFieldType starTreeFieldType = (StarTreeMapper.StarTreeFieldType) type; + assertEquals("@timestamp", starTreeFieldType.getDimensions().get(0).getField()); + assertTrue(starTreeFieldType.getDimensions().get(0) instanceof DateDimension); + DateDimension dateDim = (DateDimension) starTreeFieldType.getDimensions().get(0); + List expectedDimensionFields = Arrays.asList("@timestamp_minute", "@timestamp_half-hour"); + assertEquals(expectedDimensionFields, dateDim.getSubDimensionNames()); + List expectedTimeUnits = Arrays.asList( + new DateTimeUnitAdapter(Rounding.DateTimeUnit.MINUTES_OF_HOUR), + DataCubeDateTimeUnit.HALF_HOUR_OF_DAY + ); + for (int i = 0; i < expectedTimeUnits.size(); i++) { + assertEquals(expectedTimeUnits.get(i).shortName(), dateDim.getSortedCalendarIntervals().get(i).shortName()); + } + assertEquals("nested.status", starTreeFieldType.getDimensions().get(1).getField()); + assertEquals("nested.status", starTreeFieldType.getMetrics().get(0).getField()); + List expectedMetrics = Arrays.asList(MetricStat.VALUE_COUNT, MetricStat.SUM, MetricStat.AVG); + assertEquals(expectedMetrics, starTreeFieldType.getMetrics().get(0).getMetrics()); + assertEquals(10000, starTreeFieldType.getStarTreeConfig().maxLeafDocs()); + assertEquals(StarTreeFieldConfiguration.StarTreeBuildMode.OFF_HEAP, starTreeFieldType.getStarTreeConfig().getBuildMode()); + assertEquals(Collections.emptySet(), starTreeFieldType.getStarTreeConfig().getSkipStarNodeCreationInDims()); + } + } + public void testInValidStarTreeMinDims() throws IOException { MapperParsingException ex = expectThrows( MapperParsingException.class, @@ -1047,6 +1074,56 @@ private XContentBuilder getMinMappingWith2StarTrees() throws IOException { }); } + private XContentBuilder getMinMappingWithNestedField() throws IOException { + return topMapping(b -> { + b.startObject("composite"); + b.startObject("startree"); + b.field("type", "star_tree"); + b.startObject("config"); + + b.startArray("ordered_dimensions"); + b.startObject(); + b.field("name", "@timestamp"); + b.endObject(); + b.startObject(); + b.field("name", "nested.status"); + b.endObject(); + b.endArray(); + + b.startArray("metrics"); + b.startObject(); + b.field("name", "nested.status"); + b.endObject(); + b.startObject(); + b.field("name", "metric_field"); + b.endObject(); + b.endArray(); + + b.endObject(); + b.endObject(); + + b.endObject(); + b.startObject("properties"); + b.startObject("@timestamp"); + b.field("type", "date"); + b.endObject(); + b.startObject("nested"); + b.startObject("properties"); + b.startObject("status"); + b.field("type", "integer"); + b.endObject(); + b.endObject(); + b.endObject(); + b.startObject("metric_field"); + b.field("type", "integer"); + b.endObject(); + b.startObject("keyword1"); + b.field("type", "keyword"); + b.endObject(); + b.endObject(); + }); + } + private XContentBuilder getInvalidMapping( boolean singleDim, boolean invalidSkipDims, From 4a53ff24adbec1d5aeb3d73548171870a3de925d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 09:57:50 -0500 Subject: [PATCH 06/37] Bump ch.qos.logback:logback-core from 1.5.12 to 1.5.16 in /test/fixtures/hdfs-fixture (#16951) * Bump ch.qos.logback:logback-core in /test/fixtures/hdfs-fixture Bumps [ch.qos.logback:logback-core](https://github.com/qos-ch/logback) from 1.5.12 to 1.5.16. - [Commits](https://github.com/qos-ch/logback/compare/v_1.5.12...v_1.5.16) --- updated-dependencies: - dependency-name: ch.qos.logback:logback-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0efb53beb6e31..82bf9dd0fea0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,6 +57,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896)) - Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918)) - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919)) +- Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 02aab575bbaf0..bb2b7ebafdf81 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -74,7 +74,7 @@ dependencies { api 'org.apache.zookeeper:zookeeper:3.9.3' api "org.apache.commons:commons-text:1.13.0" api "commons-net:commons-net:3.11.1" - api "ch.qos.logback:logback-core:1.5.12" + api "ch.qos.logback:logback-core:1.5.16" api "ch.qos.logback:logback-classic:1.5.15" api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.28.0' From e73ffdf1d1a11587f2d25ba69ddb46fc25994919 Mon Sep 17 00:00:00 2001 From: Ruirui Zhang Date: Mon, 6 Jan 2025 10:36:36 -0800 Subject: [PATCH 07/37] [Workload Management] Add Workload Management IT (#16359) * add workload management IT Signed-off-by: Ruirui Zhang * address comments Signed-off-by: Ruirui Zhang --------- Signed-off-by: Ruirui Zhang --- CHANGELOG.md | 1 + .../backpressure/SearchBackpressureIT.java | 10 +- .../opensearch/wlm/WorkloadManagementIT.java | 434 ++++++++++++++++++ 3 files changed, 442 insertions(+), 3 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 82bf9dd0fea0a..99bfecfc0eac6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support for keyword fields in star-tree index ([#16233](https://github.com/opensearch-project/OpenSearch/pull/16233)) - Add a flag in QueryShardContext to differentiate inner hit query ([#16600](https://github.com/opensearch-project/OpenSearch/pull/16600)) - Add vertical scaling and SoftReference for snapshot repository data cache ([#16489](https://github.com/opensearch-project/OpenSearch/pull/16489)) +- [Workload Management] Add Workload Management IT ([#16359](https://github.com/opensearch-project/OpenSearch/pull/16359)) - Support prefix list for remote repository attributes([#16271](https://github.com/opensearch-project/OpenSearch/pull/16271)) - Add new configuration setting `synonym_analyzer`, to the `synonym` and `synonym_graph` filters, enabling the specification of a custom analyzer for reading the synonym file ([#16488](https://github.com/opensearch-project/OpenSearch/pull/16488)). - Add stats for remote publication failure and move download failure stats to remote methods([#16682](https://github.com/opensearch-project/OpenSearch/pull/16682/)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java index 40c9301ef4bce..d200b9177353a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/backpressure/SearchBackpressureIT.java @@ -314,7 +314,7 @@ public void testSearchCancellationWithBackpressureDisabled() throws InterruptedE assertNull("SearchShardTask shouldn't have cancelled for monitor_only mode", caughtException); } - private static class ExceptionCatchingListener implements ActionListener { + public static class ExceptionCatchingListener implements ActionListener { private final CountDownLatch latch; private Exception exception = null; @@ -333,7 +333,11 @@ public void onFailure(Exception e) { latch.countDown(); } - private Exception getException() { + public CountDownLatch getLatch() { + return latch; + } + + public Exception getException() { return exception; } } @@ -349,7 +353,7 @@ private Supplier descriptionSupplier(String description) { return () -> description; } - interface TaskFactory { + public interface TaskFactory { T createTask(long id, String type, String action, String description, TaskId parentTaskId, Map headers); } diff --git a/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java new file mode 100644 index 0000000000000..6b68a83da94e2 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/wlm/WorkloadManagementIT.java @@ -0,0 +1,434 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.wlm; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.ActionType; +import org.opensearch.action.search.SearchTask; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.ClusterStateUpdateTask; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.QueryGroup; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.action.ActionResponse; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.tasks.TaskCancelledException; +import org.opensearch.core.tasks.TaskId; +import org.opensearch.plugins.ActionPlugin; +import org.opensearch.plugins.Plugin; +import org.opensearch.search.backpressure.SearchBackpressureIT.ExceptionCatchingListener; +import org.opensearch.search.backpressure.SearchBackpressureIT.TaskFactory; +import org.opensearch.search.backpressure.SearchBackpressureIT.TestResponse; +import org.opensearch.tasks.CancellableTask; +import org.opensearch.tasks.Task; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.opensearch.search.SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.threadpool.ThreadPool.Names.SAME; +import static org.opensearch.wlm.QueryGroupTask.QUERY_GROUP_ID_HEADER; +import static org.hamcrest.Matchers.instanceOf; + +public class WorkloadManagementIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + final static String PUT = "PUT"; + final static String MEMORY = "MEMORY"; + final static String CPU = "CPU"; + final static String ENABLED = "enabled"; + final static String DELETE = "DELETE"; + private static final TimeValue TIMEOUT = new TimeValue(1, TimeUnit.SECONDS); + + public WorkloadManagementIT(Settings nodeSettings) { + super(nodeSettings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), false).build() }, + new Object[] { Settings.builder().put(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), true).build() } + ); + } + + @Override + protected Collection> nodePlugins() { + final List> plugins = new ArrayList<>(super.nodePlugins()); + plugins.add(TestClusterUpdatePlugin.class); + return plugins; + } + + @Before + public final void setupNodeSettings() { + Settings request = Settings.builder() + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_MEMORY_CANCELLATION_THRESHOLD.getKey(), 0.9) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_REJECTION_THRESHOLD.getKey(), 0.8) + .put(WorkloadManagementSettings.NODE_LEVEL_CPU_CANCELLATION_THRESHOLD.getKey(), 0.9) + .build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + } + + @After + public final void cleanupNodeSettings() { + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + } + + public void testHighCPUInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighCPUInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.01, ResourceType.MEMORY, 0.01) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInEnforcedMode() throws InterruptedException { + Settings request = Settings.builder().put(WorkloadManagementSettings.WLM_MODE_SETTING.getKey(), ENABLED).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(request).get()); + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNotNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + MatcherAssert.assertThat(caughtException, instanceOf(TaskCancelledException.class)); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testHighMemoryInMonitorMode() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment(MutableQueryGroupFragment.ResiliencyMode.ENFORCED, Map.of(ResourceType.MEMORY, 0.01)) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(MEMORY, queryGroup.get_id()); + assertNull("SearchTask should have been cancelled with TaskCancelledException", caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public void testNoCancellation() throws InterruptedException { + QueryGroup queryGroup = new QueryGroup( + "name", + new MutableQueryGroupFragment( + MutableQueryGroupFragment.ResiliencyMode.ENFORCED, + Map.of(ResourceType.CPU, 0.8, ResourceType.MEMORY, 0.8) + ) + ); + updateQueryGroupInClusterState(PUT, queryGroup); + Exception caughtException = executeQueryGroupTask(CPU, queryGroup.get_id()); + assertNull(caughtException); + updateQueryGroupInClusterState(DELETE, queryGroup); + } + + public Exception executeQueryGroupTask(String resourceType, String queryGroupId) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute( + TestQueryGroupTaskTransportAction.ACTION, + new TestQueryGroupTaskRequest( + resourceType, + queryGroupId, + (TaskFactory) (id, type, action, description, parentTaskId, headers) -> new SearchTask( + id, + type, + action, + () -> description, + parentTaskId, + headers + ) + ), + listener + ); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds() + 1, TimeUnit.SECONDS)); + return listener.getException(); + } + + public void updateQueryGroupInClusterState(String method, QueryGroup queryGroup) throws InterruptedException { + ExceptionCatchingListener listener = new ExceptionCatchingListener(); + client().execute(TestClusterUpdateTransportAction.ACTION, new TestClusterUpdateRequest(queryGroup, method), listener); + assertTrue(listener.getLatch().await(TIMEOUT.getSeconds(), TimeUnit.SECONDS)); + assertEquals(0, listener.getLatch().getCount()); + } + + public static class TestClusterUpdateRequest extends ClusterManagerNodeRequest { + final private String method; + final private QueryGroup queryGroup; + + public TestClusterUpdateRequest(QueryGroup queryGroup, String method) { + this.method = method; + this.queryGroup = queryGroup; + } + + public TestClusterUpdateRequest(StreamInput in) throws IOException { + super(in); + this.method = in.readString(); + this.queryGroup = new QueryGroup(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(method); + queryGroup.writeTo(out); + } + + public QueryGroup getQueryGroup() { + return queryGroup; + } + + public String getMethod() { + return method; + } + } + + public static class TestClusterUpdateTransportAction extends TransportClusterManagerNodeAction { + public static final ActionType ACTION = new ActionType<>("internal::test_cluster_update_action", TestResponse::new); + + @Inject + public TestClusterUpdateTransportAction( + ThreadPool threadPool, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService + ) { + super( + ACTION.name(), + transportService, + clusterService, + threadPool, + actionFilters, + TestClusterUpdateRequest::new, + indexNameExpressionResolver + ); + } + + @Override + protected String executor() { + return SAME; + } + + @Override + protected TestResponse read(StreamInput in) throws IOException { + return new TestResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(TestClusterUpdateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + TestClusterUpdateRequest request, + ClusterState clusterState, + ActionListener listener + ) { + clusterService.submitStateUpdateTask("query-group-persistence-service", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + Map currentGroups = currentState.metadata().queryGroups(); + QueryGroup queryGroup = request.getQueryGroup(); + String id = queryGroup.get_id(); + String method = request.getMethod(); + Metadata metadata; + if (method.equals(PUT)) { // create + metadata = Metadata.builder(currentState.metadata()).put(queryGroup).build(); + } else { // delete + metadata = Metadata.builder(currentState.metadata()).remove(currentGroups.get(id)).build(); + } + return ClusterState.builder(currentState).metadata(metadata).build(); + } + + @Override + public void onFailure(String source, Exception e) { + listener.onFailure(e); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + listener.onResponse(new TestResponse()); + } + }); + } + } + + public static class TestQueryGroupTaskRequest extends ActionRequest { + private final String type; + private final String queryGroupId; + private TaskFactory taskFactory; + + public TestQueryGroupTaskRequest(String type, String queryGroupId, TaskFactory taskFactory) { + this.type = type; + this.queryGroupId = queryGroupId; + this.taskFactory = taskFactory; + } + + public TestQueryGroupTaskRequest(StreamInput in) throws IOException { + super(in); + this.type = in.readString(); + this.queryGroupId = in.readString(); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return taskFactory.createTask(id, type, action, "", parentTaskId, headers); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(type); + out.writeString(queryGroupId); + } + + public String getType() { + return type; + } + + public String getQueryGroupId() { + return queryGroupId; + } + } + + public static class TestQueryGroupTaskTransportAction extends HandledTransportAction { + public static final ActionType ACTION = new ActionType<>("internal::test_query_group_task_action", TestResponse::new); + private final ThreadPool threadPool; + + @Inject + public TestQueryGroupTaskTransportAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters) { + super(ACTION.name(), transportService, actionFilters, TestQueryGroupTaskRequest::new); + this.threadPool = threadPool; + } + + @Override + protected void doExecute(Task task, TestQueryGroupTaskRequest request, ActionListener listener) { + threadPool.getThreadContext().putHeader(QUERY_GROUP_ID_HEADER, request.getQueryGroupId()); + threadPool.executor(ThreadPool.Names.SEARCH).execute(() -> { + try { + CancellableTask cancellableTask = (CancellableTask) task; + ((QueryGroupTask) task).setQueryGroupId(threadPool.getThreadContext()); + assertEquals(request.getQueryGroupId(), ((QueryGroupTask) task).getQueryGroupId()); + long startTime = System.nanoTime(); + while (System.nanoTime() - startTime < TIMEOUT.getNanos()) { + doWork(request); + if (cancellableTask.isCancelled()) { + break; + } + } + if (cancellableTask.isCancelled()) { + throw new TaskCancelledException(cancellableTask.getReasonCancelled()); + } else { + listener.onResponse(new TestResponse()); + } + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private void doWork(TestQueryGroupTaskRequest request) throws InterruptedException { + switch (request.getType()) { + case "CPU": + long i = 0, j = 1, k = 1, iterations = 1000; + do { + j += i; + k *= j; + i++; + } while (i < iterations); + break; + case "MEMORY": + int bytesToAllocate = (int) (Runtime.getRuntime().totalMemory() * 0.01); + Byte[] bytes = new Byte[bytesToAllocate]; + int[] ints = new int[bytesToAllocate]; + break; + } + } + } + + public static class TestClusterUpdatePlugin extends Plugin implements ActionPlugin { + @Override + public List> getActions() { + return Arrays.asList( + new ActionHandler<>(TestClusterUpdateTransportAction.ACTION, TestClusterUpdateTransportAction.class), + new ActionHandler<>(TestQueryGroupTaskTransportAction.ACTION, TestQueryGroupTaskTransportAction.class) + ); + } + + @Override + public List> getClientActions() { + return Arrays.asList(TestClusterUpdateTransportAction.ACTION, TestQueryGroupTaskTransportAction.ACTION); + } + } +} From aca373b6b9d4f1afa9507874bdf64f8f9924f9fb Mon Sep 17 00:00:00 2001 From: Rishabh Singh Date: Mon, 6 Jan 2025 11:27:08 -0800 Subject: [PATCH 08/37] Add new benchmark config for nested workload (#16956) Signed-off-by: Rishabh Singh --- .github/benchmark-configs.json | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index b3590f8a2f942..1c80f5048a611 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -256,5 +256,21 @@ "data_instance_config": "4vCPU, 32G Mem, 16G Heap" }, "baseline_cluster_config": "x64-r5.xlarge-1-shard-0-replica-snapshot-baseline" + }, + "id_16": { + "description": "Benchmarking config for NESTED workload, benchmarks nested queries with inner-hits", + "supported_major_versions": ["2", "3"], + "cluster-benchmark-configs": { + "SINGLE_NODE_CLUSTER": "true", + "MIN_DISTRIBUTION": "true", + "TEST_WORKLOAD": "nested", + "WORKLOAD_PARAMS": "{\"number_of_replicas\":\"0\",\"number_of_shards\":\"1\"}", + "CAPTURE_NODE_STAT": "true" + }, + "cluster_configuration": { + "size": "Single-Node", + "data_instance_config": "4vCPU, 32G Mem, 16G Heap" + }, + "baseline_cluster_config": "x64-r5.xlarge-single-node-1-shard-0-replica-baseline" + } } -} From dd9695362a9d6db1c3ee2117c269f025155d4957 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:52:48 -0500 Subject: [PATCH 09/37] Bump com.azure:azure-core-http-netty from 1.15.5 to 1.15.7 in /plugins/repository-azure (#16952) * Bump com.azure:azure-core-http-netty in /plugins/repository-azure Bumps [com.azure:azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.15.5 to 1.15.7. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core-http-netty_1.15.5...azure-core-http-netty_1.15.7) --- updated-dependencies: - dependency-name: com.azure:azure-core-http-netty dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 1 + plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-core-http-netty-1.15.5.jar.sha1 | 1 - .../licenses/azure-core-http-netty-1.15.7.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 99bfecfc0eac6..bcf1904db8d27 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918)) - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919)) - Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951)) +- Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 03ea07623dbaf..ad12ec9003e64 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -48,7 +48,7 @@ dependencies { api 'com.azure:azure-json:1.3.0' api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.28.0' - api 'com.azure:azure-core-http-netty:1.15.5' + api 'com.azure:azure-core-http-netty:1.15.7' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 deleted file mode 100644 index 2f5239cc26148..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -44d99705d3759e2ad7ee8110f811d4ed304a6a7c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 new file mode 100644 index 0000000000000..d72f835c69903 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.7.jar.sha1 @@ -0,0 +1 @@ +a83247eeeb7f63f891e725228d54c3c24132c66a \ No newline at end of file From 0b365998ed6e4f537dbdf7983a077bc53e785bb9 Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Mon, 6 Jan 2025 16:23:59 -0800 Subject: [PATCH 10/37] Always use constant_score query for match_only_text (#16964) In some cases, when we create a term query over a `match_only_text` field, it may still try to compute scores, which prevents early termination. We should *always* use a constant score query when querying `match_only_text`, since we don't have the statistics required to compute scores. --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../mapper/MatchOnlyTextFieldMapper.java | 11 +++++++++ .../mapper/MatchOnlyTextFieldMapperTests.java | 23 ++++++++++++++++++- .../mapper/MatchOnlyTextFieldTypeTests.java | 18 +++++++++++++++ 4 files changed, 52 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index bcf1904db8d27..1b49368a20fa8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763)) - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) +- Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) ### Security diff --git a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java index fb97f8c309a70..757de65248d33 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapper.java @@ -16,6 +16,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; @@ -290,6 +291,16 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions, return new SourceFieldMatchQuery(builder.build(), phrasePrefixQuery, this, context); } + @Override + public Query termQuery(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQuery(value, context)); + } + + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return new ConstantScoreQuery(super.termQueryCaseInsensitive(value, context)); + } + private List> getTermsFromTokenStream(TokenStream stream) throws IOException { final List> termArray = new ArrayList<>(); TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java index 580f8cccc9af5..d9f0fd6657085 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldMapperTests.java @@ -15,11 +15,13 @@ import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.MultiPhraseQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.tests.analysis.MockSynonymAnalyzer; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.lucene.search.MultiPhrasePrefixQuery; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.MediaTypeRegistry; @@ -28,6 +30,7 @@ import org.opensearch.index.query.MatchPhraseQueryBuilder; import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.SourceFieldMatchQuery; +import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.index.search.MatchQuery; import org.junit.Before; @@ -391,7 +394,7 @@ public void testPhraseQuery() throws IOException { assertThat(q, is(expectedQuery)); Query q4 = new MatchPhraseQueryBuilder("field", "singleton").toQuery(queryShardContext); - assertThat(q4, is(new TermQuery(new Term("field", "singleton")))); + assertThat(q4, is(new ConstantScoreQuery(new TermQuery(new Term("field", "singleton"))))); Query q2 = new MatchPhraseQueryBuilder("field", "three words here").toQuery(queryShardContext); expectedQuery = new SourceFieldMatchQuery( @@ -447,4 +450,22 @@ public void testPhraseQuery() throws IOException { ); assertThat(q6, is(expectedQuery)); } + + public void testTermQuery() throws Exception { + MapperService mapperService = createMapperService(mapping(b -> { + b.startObject("field"); + { + b.field("type", textFieldName); + b.field("analyzer", "my_stop_analyzer"); // "standard" will be replaced with MockSynonymAnalyzer + } + b.endObject(); + })); + QueryShardContext queryShardContext = createQueryShardContext(mapperService); + + Query q = new TermQueryBuilder("field", "foo").rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), q); + + q = new TermQueryBuilder("field", "foo").caseInsensitive(true).rewrite(queryShardContext).toQuery(queryShardContext); + assertEquals(new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "foo"))), q); + } } diff --git a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java index 51234fa04ddc2..0170cdde8b21c 100644 --- a/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/MatchOnlyTextFieldTypeTests.java @@ -8,7 +8,11 @@ package org.opensearch.index.mapper; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.TermQuery; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; public class MatchOnlyTextFieldTypeTests extends TextFieldTypeTests { @@ -28,4 +32,18 @@ TextFieldMapper.TextFieldType createFieldType(boolean searchable) { ParametrizedFieldMapper.Parameter.metaParam().get() ); } + + @Override + public void testTermQuery() { + MappedFieldType ft = createFieldType(true); + assertEquals(new ConstantScoreQuery(new TermQuery(new Term("field", "foo"))), ft.termQuery("foo", null)); + assertEquals( + new ConstantScoreQuery(AutomatonQueries.caseInsensitiveTermQuery(new Term("field", "fOo"))), + ft.termQueryCaseInsensitive("fOo", null) + ); + + MappedFieldType unsearchable = createFieldType(false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); + assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + } } From e7e19f712596ca0ca0531ff5c39663cc472fc95f Mon Sep 17 00:00:00 2001 From: expani1729 <110471048+expani@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:53:05 -0800 Subject: [PATCH 11/37] Changes to support unmapped fields in metric aggregation (#16481) Avoids exception when querying unmapped field when star tree experimental feature is enables. --------- Signed-off-by: expani --- .../startree/utils/StarTreeQueryHelper.java | 2 +- .../ValuesSourceAggregatorFactory.java | 2 +- .../startree/MetricAggregatorTests.java | 139 ++++++++++++++++++ .../startree/StarTreeFilterTests.java | 13 +- .../aggregations/AggregatorTestCase.java | 22 ++- 5 files changed, 172 insertions(+), 6 deletions(-) diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java index e538be5d5bece..e46cf6f56b36e 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/StarTreeQueryHelper.java @@ -152,7 +152,7 @@ private static MetricStat validateStarTreeMetricSupport( MetricStat metricStat = ((MetricAggregatorFactory) aggregatorFactory).getMetricStat(); field = ((MetricAggregatorFactory) aggregatorFactory).getField(); - if (supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { + if (field != null && supportedMetrics.containsKey(field) && supportedMetrics.get(field).contains(metricStat)) { return metricStat; } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index d862b2c2784de..41344fd06cbbc 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -104,6 +104,6 @@ public String getStatsSubtype() { } public String getField() { - return config.fieldContext().field(); + return config.fieldContext() != null ? config.fieldContext().field() : null; } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java index 12e83cbbadd5d..05f48eb9243af 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/MetricAggregatorTests.java @@ -28,18 +28,27 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.MockBigArrays; +import org.opensearch.common.util.MockPageCacheRecycler; +import org.opensearch.core.indices.breaker.CircuitBreakerService; +import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.codec.composite.CompositeIndexReader; import org.opensearch.index.codec.composite.composite912.Composite912Codec; import org.opensearch.index.codec.composite912.datacube.startree.StarTreeDocValuesFormatTests; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; +import org.opensearch.index.compositeindex.datacube.MetricStat; import org.opensearch.index.compositeindex.datacube.NumericDimension; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.NumberFieldMapper; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryShardContext; import org.opensearch.index.query.TermQueryBuilder; import org.opensearch.search.aggregations.AggregationBuilder; +import org.opensearch.search.aggregations.AggregatorFactories; +import org.opensearch.search.aggregations.AggregatorFactory; import org.opensearch.search.aggregations.AggregatorTestCase; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.metrics.AvgAggregationBuilder; @@ -49,14 +58,17 @@ import org.opensearch.search.aggregations.metrics.InternalSum; import org.opensearch.search.aggregations.metrics.InternalValueCount; import org.opensearch.search.aggregations.metrics.MaxAggregationBuilder; +import org.opensearch.search.aggregations.metrics.MetricAggregatorFactory; import org.opensearch.search.aggregations.metrics.MinAggregationBuilder; import org.opensearch.search.aggregations.metrics.SumAggregationBuilder; import org.opensearch.search.aggregations.metrics.ValueCountAggregationBuilder; +import org.opensearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; import java.util.Random; @@ -69,6 +81,8 @@ import static org.opensearch.search.aggregations.AggregationBuilders.min; import static org.opensearch.search.aggregations.AggregationBuilders.sum; import static org.opensearch.test.InternalAggregationTestCase.DEFAULT_MAX_BUCKETS; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class MetricAggregatorTests extends AggregatorTestCase { @@ -267,6 +281,110 @@ public void testStarTreeDocValues() throws IOException { ); } + CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + + QueryShardContext queryShardContext = queryShardContextMock( + indexSearcher, + mapperServiceMock(), + createIndexSettings(), + circuitBreakerService, + new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService).withCircuitBreaking() + ); + + MetricAggregatorFactory aggregatorFactory = mock(MetricAggregatorFactory.class); + when(aggregatorFactory.getSubFactories()).thenReturn(AggregatorFactories.EMPTY); + when(aggregatorFactory.getField()).thenReturn(FIELD_NAME); + when(aggregatorFactory.getMetricStat()).thenReturn(MetricStat.SUM); + + // Case when field and metric type in aggregation are fully supported by star tree. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.SUM, MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + true + ); + + // Case when the field is not supported by star tree + SumAggregationBuilder invalidFieldSumAggBuilder = sum("_name").field("hello"); + testCase( + indexSearcher, + query, + queryBuilder, + invalidFieldSumAggBuilder, + starTree, + supportedDimensions, + Collections.emptyList(), + verifyAggregation(InternalSum::getValue), + invalidFieldSumAggBuilder.build(queryShardContext, null), + false // Invalid fields will return null StarTreeQueryContext which will not cause early termination by leaf collector + ); + + // Case when metric type in aggregation is not supported by star tree but the field is supported. + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric(FIELD_NAME, List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when field is not present in supported metrics + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { mock(MetricAggregatorFactory.class) }); + when(aggregatorFactory.getSubFactories()).thenReturn(aggregatorFactories); + + // Case when sub aggregations are present + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + aggregatorFactory, + false + ); + + // Case when aggregation factory is not metric aggregation + testCase( + indexSearcher, + query, + queryBuilder, + sumAggregationBuilder, + starTree, + supportedDimensions, + List.of(new Metric("hello", List.of(MetricStat.MAX, MetricStat.MIN, MetricStat.AVG))), + verifyAggregation(InternalSum::getValue), + mock(ValuesSourceAggregatorFactory.class), + false + ); + ir.close(); directory.close(); } @@ -287,6 +405,21 @@ private void testC CompositeIndexFieldInfo starTree, List supportedDimensions, BiConsumer verify + ) throws IOException { + testCase(searcher, query, queryBuilder, aggBuilder, starTree, supportedDimensions, Collections.emptyList(), verify, null, true); + } + + private void testCase( + IndexSearcher searcher, + Query query, + QueryBuilder queryBuilder, + T aggBuilder, + CompositeIndexFieldInfo starTree, + List supportedDimensions, + List supportedMetrics, + BiConsumer verify, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination ) throws IOException { V starTreeAggregation = searchAndReduceStarTree( createIndexSettings(), @@ -296,8 +429,11 @@ private void testC aggBuilder, starTree, supportedDimensions, + supportedMetrics, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); V expectedAggregation = searchAndReduceStarTree( @@ -308,8 +444,11 @@ private void testC aggBuilder, null, null, + null, DEFAULT_MAX_BUCKETS, false, + aggregatorFactory, + assertCollectorEarlyTermination, DEFAULT_MAPPED_FIELD ); verify.accept(expectedAggregation, starTreeAggregation); diff --git a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java index b03cb5ac7bb9d..c1cb19b9576e4 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/startree/StarTreeFilterTests.java @@ -87,7 +87,8 @@ public void testStarTreeFilterWithDocsInSVDFieldButNoStarNode() throws IOExcepti testStarTreeFilter(10, false); } - private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + private Directory createStarTreeIndex(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension, List docs) + throws IOException { Directory directory = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(null); conf.setCodec(getCodec(maxLeafDoc, skipStarNodeCreationForSDVDimension)); @@ -95,7 +96,6 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS RandomIndexWriter iw = new RandomIndexWriter(random(), directory, conf); int totalDocs = 100; - List docs = new ArrayList<>(); for (int i = 0; i < totalDocs; i++) { Document doc = new Document(); doc.add(new SortedNumericDocValuesField(SNDV, i)); @@ -110,6 +110,15 @@ private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForS } iw.forceMerge(1); iw.close(); + return directory; + } + + private void testStarTreeFilter(int maxLeafDoc, boolean skipStarNodeCreationForSDVDimension) throws IOException { + List docs = new ArrayList<>(); + + Directory directory = createStarTreeIndex(maxLeafDoc, skipStarNodeCreationForSDVDimension, docs); + + int totalDocs = docs.size(); DirectoryReader ir = DirectoryReader.open(directory); initValuesSourceRegistry(); diff --git a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java index e1728c4476699..27142b298db52 100644 --- a/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/search/aggregations/AggregatorTestCase.java @@ -93,6 +93,7 @@ import org.opensearch.index.cache.query.DisabledQueryCache; import org.opensearch.index.codec.composite.CompositeIndexFieldInfo; import org.opensearch.index.compositeindex.datacube.Dimension; +import org.opensearch.index.compositeindex.datacube.Metric; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeQueryHelper; import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexFieldDataCache; @@ -348,7 +349,9 @@ protected CountingAggregator createCountingAggregator( IndexSettings indexSettings, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext; @@ -360,7 +363,9 @@ protected CountingAggregator createCountingAggregator( queryBuilder, starTree, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); } else { @@ -389,7 +394,9 @@ protected SearchContext createSearchContextWithStarTreeContext( QueryBuilder queryBuilder, CompositeIndexFieldInfo starTree, List supportedDimensions, + List supportedMetrics, MultiBucketConsumer bucketConsumer, + AggregatorFactory aggregatorFactory, MappedFieldType... fieldTypes ) throws IOException { SearchContext searchContext = createSearchContext( @@ -406,7 +413,12 @@ protected SearchContext createSearchContextWithStarTreeContext( AggregatorFactories aggregatorFactories = mock(AggregatorFactories.class); when(searchContext.aggregations()).thenReturn(searchContextAggregations); when(searchContextAggregations.factories()).thenReturn(aggregatorFactories); - when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + + if (aggregatorFactory != null) { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] { aggregatorFactory }); + } else { + when(aggregatorFactories.getFactories()).thenReturn(new AggregatorFactory[] {}); + } CompositeDataCubeFieldType compositeMappedFieldType = mock(CompositeDataCubeFieldType.class); when(compositeMappedFieldType.name()).thenReturn(starTree.getField()); @@ -414,6 +426,7 @@ protected SearchContext createSearchContextWithStarTreeContext( Set compositeFieldTypes = Set.of(compositeMappedFieldType); when((compositeMappedFieldType).getDimensions()).thenReturn(supportedDimensions); + when((compositeMappedFieldType).getMetrics()).thenReturn(supportedMetrics); MapperService mapperService = mock(MapperService.class); when(mapperService.getCompositeFieldTypes()).thenReturn(compositeFieldTypes); when(searchContext.mapperService()).thenReturn(mapperService); @@ -740,8 +753,11 @@ protected A searchAndReduc AggregationBuilder builder, CompositeIndexFieldInfo compositeIndexFieldInfo, List supportedDimensions, + List supportedMetrics, int maxBucket, boolean hasNested, + AggregatorFactory aggregatorFactory, + boolean assertCollectorEarlyTermination, MappedFieldType... fieldTypes ) throws IOException { query = query.rewrite(searcher); @@ -764,7 +780,9 @@ protected A searchAndReduc indexSettings, compositeIndexFieldInfo, supportedDimensions, + supportedMetrics, bucketConsumer, + aggregatorFactory, fieldTypes ); @@ -772,7 +790,7 @@ protected A searchAndReduc searcher.search(query, countingAggregator); countingAggregator.postCollection(); aggs.add(countingAggregator.buildTopLevel()); - if (compositeIndexFieldInfo != null) { + if (compositeIndexFieldInfo != null && assertCollectorEarlyTermination) { assertEquals(0, countingAggregator.collectCounter.get()); } From 1d4b85f5ff8c4e314ecf49190b68eb995bf571d8 Mon Sep 17 00:00:00 2001 From: Ashish Singh Date: Thu, 9 Jan 2025 10:22:24 +0530 Subject: [PATCH 12/37] Use async client for delete blob or path in S3 Blob Container (#16788) * Use async client for delete blob or path in S3 Blob Container Signed-off-by: Ashish Singh * Fix UTs Signed-off-by: Ashish Singh * Fix failures in S3BlobStoreRepositoryTests Signed-off-by: Ashish Singh * Fix S3BlobStoreRepositoryTests Signed-off-by: Ashish Singh * Fix failures in S3RepositoryThirdPartyTests Signed-off-by: Ashish Singh * Fix failures in S3RepositoryPluginTests Signed-off-by: Ashish Singh --------- Signed-off-by: Ashish Singh --- .../s3/S3BlobStoreRepositoryTests.java | 27 +- .../s3/S3RepositoryThirdPartyTests.java | 8 - .../repositories/s3/S3AsyncService.java | 30 +- .../repositories/s3/S3BlobContainer.java | 140 +------ .../repositories/s3/S3RepositoryPlugin.java | 19 +- .../s3/S3BlobStoreContainerTests.java | 347 +++++++++++++----- .../s3/S3RepositoryPluginTests.java | 11 +- .../common/settings/ClusterSettings.java | 1 - .../blobstore/BlobStoreRepository.java | 38 +- 9 files changed, 322 insertions(+), 299 deletions(-) diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index 944de326d144c..5bea51706cfae 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -59,6 +59,7 @@ import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.OpenSearchMockAPIBasedRepositoryIntegTestCase; +import org.opensearch.repositories.s3.async.AsyncTransferManager; import org.opensearch.repositories.s3.utils.AwsRequestSigner; import org.opensearch.snapshots.mockstore.BlobStoreWrapper; import org.opensearch.test.BackgroundIndexer; @@ -153,7 +154,6 @@ protected Settings nodeSettings(int nodeOrdinal) { // Disable request throttling because some random values in tests might generate too many failures for the S3 client .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false) .put(S3ClientSettings.PROXY_TYPE_SETTING.getConcreteSettingForNamespace("test").getKey(), ProxySettings.ProxyType.DIRECT) - .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) .put(super.nodeSettings(nodeOrdinal)) .setSecureSettings(secureSettings); @@ -253,22 +253,27 @@ protected S3Repository createRepository( ClusterService clusterService, RecoverySettings recoverySettings ) { - GenericStatsMetricPublisher genericStatsMetricPublisher = new GenericStatsMetricPublisher(10000L, 10, 10000L, 10); - + AsyncTransferManager asyncUploadUtils = new AsyncTransferManager( + S3Repository.PARALLEL_MULTIPART_UPLOAD_MINIMUM_PART_SIZE_SETTING.get(clusterService.getSettings()).getBytes(), + normalExecutorBuilder.getStreamReader(), + priorityExecutorBuilder.getStreamReader(), + urgentExecutorBuilder.getStreamReader(), + transferSemaphoresHolder + ); return new S3Repository( metadata, registry, service, clusterService, recoverySettings, - null, - null, - null, - null, - null, - false, - null, - null, + asyncUploadUtils, + urgentExecutorBuilder, + priorityExecutorBuilder, + normalExecutorBuilder, + s3AsyncService, + S3Repository.PARALLEL_MULTIPART_UPLOAD_ENABLED_SETTING.get(clusterService.getSettings()), + normalPrioritySizeBasedBlockingQ, + lowPrioritySizeBasedBlockingQ, genericStatsMetricPublisher ) { diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index f0e40db965646..7db9a0d3ba790 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -55,14 +55,6 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { - @Override - protected Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) - .build(); - } - @Override @Before @SuppressForbidden(reason = "Need to set system property here for AWS SDK v2") diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java index 8bbef168de89c..7397c3132c17c 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3AsyncService.java @@ -25,7 +25,6 @@ import software.amazon.awssdk.http.nio.netty.NettyNioAsyncHttpClient; import software.amazon.awssdk.http.nio.netty.ProxyConfiguration; import software.amazon.awssdk.http.nio.netty.SdkEventLoopGroup; -import software.amazon.awssdk.profiles.ProfileFileSystemSetting; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3AsyncClient; import software.amazon.awssdk.services.s3.S3AsyncClientBuilder; @@ -120,6 +119,7 @@ public AmazonAsyncS3Reference client( if (existing != null && existing.tryIncRef()) { return existing; } + final AmazonAsyncS3Reference clientReference = new AmazonAsyncS3Reference( buildClient(clientSettings, urgentExecutorBuilder, priorityExecutorBuilder, normalExecutorBuilder) ); @@ -235,17 +235,17 @@ synchronized AmazonAsyncS3WithCredentials buildClient( } static ClientOverrideConfiguration buildOverrideConfiguration(final S3ClientSettings clientSettings) { + RetryPolicy retryPolicy = SocketAccess.doPrivileged( + () -> RetryPolicy.builder() + .numRetries(clientSettings.maxRetries) + .throttlingBackoffStrategy( + clientSettings.throttleRetries ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) : BackoffStrategy.none() + ) + .build() + ); + return ClientOverrideConfiguration.builder() - .retryPolicy( - RetryPolicy.builder() - .numRetries(clientSettings.maxRetries) - .throttlingBackoffStrategy( - clientSettings.throttleRetries - ? BackoffStrategy.defaultThrottlingStrategy(RetryMode.STANDARD) - : BackoffStrategy.none() - ) - .build() - ) + .retryPolicy(retryPolicy) .apiCallAttemptTimeout(Duration.ofMillis(clientSettings.requestTimeoutMillis)) .build(); } @@ -346,12 +346,7 @@ static AwsCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c // valid paths. @SuppressForbidden(reason = "Need to provide this override to v2 SDK so that path does not default to home path") private static void setDefaultAwsProfilePath() { - if (ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.getStringValue().isEmpty()) { - System.setProperty(ProfileFileSystemSetting.AWS_SHARED_CREDENTIALS_FILE.property(), System.getProperty("opensearch.path.conf")); - } - if (ProfileFileSystemSetting.AWS_CONFIG_FILE.getStringValue().isEmpty()) { - System.setProperty(ProfileFileSystemSetting.AWS_CONFIG_FILE.property(), System.getProperty("opensearch.path.conf")); - } + S3Service.setDefaultAwsProfilePath(); } private static IrsaCredentials buildFromEnvironment(IrsaCredentials defaults) { @@ -443,5 +438,6 @@ public AwsCredentials resolveCredentials() { @Override public void close() { releaseCachedClients(); + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index 1a402e8431e25..8690a5c91680a 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -43,9 +43,6 @@ import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload; import software.amazon.awssdk.services.s3.model.CompletedPart; import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest; -import software.amazon.awssdk.services.s3.model.Delete; -import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; -import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; import software.amazon.awssdk.services.s3.model.GetObjectRequest; @@ -55,9 +52,7 @@ import software.amazon.awssdk.services.s3.model.ListObjectsV2Response; import software.amazon.awssdk.services.s3.model.NoSuchKeyException; import software.amazon.awssdk.services.s3.model.ObjectAttributes; -import software.amazon.awssdk.services.s3.model.ObjectIdentifier; import software.amazon.awssdk.services.s3.model.PutObjectRequest; -import software.amazon.awssdk.services.s3.model.S3Error; import software.amazon.awssdk.services.s3.model.ServerSideEncryption; import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; @@ -68,7 +63,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.StreamContext; @@ -101,11 +96,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; @@ -381,125 +373,17 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS } @Override - public DeleteResult delete() throws IOException { - final AtomicLong deletedBlobs = new AtomicLong(); - final AtomicLong deletedBytes = new AtomicLong(); - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - ListObjectsV2Iterable listObjectsIterable = SocketAccess.doPrivileged( - () -> clientReference.get() - .listObjectsV2Paginator( - ListObjectsV2Request.builder() - .bucket(blobStore.bucket()) - .prefix(keyPath) - .overrideConfiguration( - o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher) - ) - .build() - ) - ); - - Iterator listObjectsResponseIterator = listObjectsIterable.iterator(); - while (listObjectsResponseIterator.hasNext()) { - ListObjectsV2Response listObjectsResponse = SocketAccess.doPrivileged(listObjectsResponseIterator::next); - List blobsToDelete = listObjectsResponse.contents().stream().map(s3Object -> { - deletedBlobs.incrementAndGet(); - deletedBytes.addAndGet(s3Object.size()); - - return s3Object.key(); - }).collect(Collectors.toList()); - - if (!listObjectsResponseIterator.hasNext()) { - blobsToDelete.add(keyPath); - } - - doDeleteBlobs(blobsToDelete, false); - } - } catch (SdkException e) { - throw new IOException("Exception when deleting blob container [" + keyPath + "]", e); - } - - return new DeleteResult(deletedBlobs.get(), deletedBytes.get()); + public DeleteResult delete() { + PlainActionFuture future = new PlainActionFuture<>(); + deleteAsync(future); + return future.actionGet(); } @Override - public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { - doDeleteBlobs(blobNames, true); - } - - private void doDeleteBlobs(List blobNames, boolean relative) throws IOException { - if (blobNames.isEmpty()) { - return; - } - final Set outstanding; - if (relative) { - outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet()); - } else { - outstanding = new HashSet<>(blobNames); - } - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API allows 1k blobs per delete so we split up the given blobs into requests of bulk size deletes - final List deleteRequests = new ArrayList<>(); - final List partition = new ArrayList<>(); - for (String key : outstanding) { - partition.add(key); - if (partition.size() == blobStore.getBulkDeletesSize()) { - deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); - partition.clear(); - } - } - if (partition.isEmpty() == false) { - deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); - } - SocketAccess.doPrivilegedVoid(() -> { - SdkException aex = null; - for (DeleteObjectsRequest deleteRequest : deleteRequests) { - List keysInRequest = deleteRequest.delete() - .objects() - .stream() - .map(ObjectIdentifier::key) - .collect(Collectors.toList()); - try { - DeleteObjectsResponse deleteObjectsResponse = clientReference.get().deleteObjects(deleteRequest); - outstanding.removeAll(keysInRequest); - outstanding.addAll(deleteObjectsResponse.errors().stream().map(S3Error::key).collect(Collectors.toSet())); - if (!deleteObjectsResponse.errors().isEmpty()) { - logger.warn( - () -> new ParameterizedMessage( - "Failed to delete some blobs {}", - deleteObjectsResponse.errors() - .stream() - .map(s3Error -> "[" + s3Error.key() + "][" + s3Error.code() + "][" + s3Error.message() + "]") - .collect(Collectors.toList()) - ) - ); - } - } catch (SdkException e) { - // The AWS client threw any unexpected exception and did not execute the request at all so we do not - // remove any keys from the outstanding deletes set. - aex = ExceptionsHelper.useOrSuppress(aex, e); - } - } - if (aex != null) { - throw aex; - } - }); - } catch (Exception e) { - throw new IOException("Failed to delete blobs [" + outstanding + "]", e); - } - assert outstanding.isEmpty(); - } - - private DeleteObjectsRequest bulkDelete(String bucket, List blobs) { - return DeleteObjectsRequest.builder() - .bucket(bucket) - .delete( - Delete.builder() - .objects(blobs.stream().map(blob -> ObjectIdentifier.builder().key(blob).build()).collect(Collectors.toList())) - .quiet(true) - .build() - ) - .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().deleteObjectsMetricPublisher)) - .build(); + public void deleteBlobsIgnoringIfNotExists(List blobNames) { + PlainActionFuture future = new PlainActionFuture<>(); + deleteBlobsAsyncIgnoringIfNotExists(blobNames, future); + future.actionGet(); } @Override @@ -886,7 +770,11 @@ public void deleteAsync(ActionListener completionListener) { try (AmazonAsyncS3Reference asyncClientReference = blobStore.asyncClientReference()) { S3AsyncClient s3AsyncClient = asyncClientReference.get().client(); - ListObjectsV2Request listRequest = ListObjectsV2Request.builder().bucket(blobStore.bucket()).prefix(keyPath).build(); + ListObjectsV2Request listRequest = ListObjectsV2Request.builder() + .bucket(blobStore.bucket()) + .prefix(keyPath) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().listObjectsMetricPublisher)) + .build(); ListObjectsV2Publisher listPublisher = s3AsyncClient.listObjectsV2Paginator(listRequest); AtomicLong deletedBlobs = new AtomicLong(); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 1048ec784ec4e..72a812339e387 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -93,19 +93,19 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo private static final String NORMAL_TRANSFER_QUEUE_CONSUMER = "normal_transfer_queue_consumer"; protected final S3Service service; - private final S3AsyncService s3AsyncService; + protected final S3AsyncService s3AsyncService; private final Path configPath; - private AsyncExecutorContainer urgentExecutorBuilder; - private AsyncExecutorContainer priorityExecutorBuilder; - private AsyncExecutorContainer normalExecutorBuilder; + protected AsyncExecutorContainer urgentExecutorBuilder; + protected AsyncExecutorContainer priorityExecutorBuilder; + protected AsyncExecutorContainer normalExecutorBuilder; private ExecutorService lowTransferQConsumerService; private ExecutorService normalTransferQConsumerService; - private SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ; - private SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ; - private TransferSemaphoresHolder transferSemaphoresHolder; - private GenericStatsMetricPublisher genericStatsMetricPublisher; + protected SizeBasedBlockingQ normalPrioritySizeBasedBlockingQ; + protected SizeBasedBlockingQ lowPrioritySizeBasedBlockingQ; + protected TransferSemaphoresHolder transferSemaphoresHolder; + protected GenericStatsMetricPublisher genericStatsMetricPublisher; public S3RepositoryPlugin(final Settings settings, final Path configPath) { this(settings, configPath, new S3Service(configPath), new S3AsyncService(configPath)); @@ -387,5 +387,8 @@ public void reload(Settings settings) { public void close() throws IOException { service.close(); s3AsyncService.close(); + urgentExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + priorityExecutorBuilder.getAsyncTransferEventLoopGroup().close(); + normalExecutorBuilder.getAsyncTransferEventLoopGroup().close(); } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 2cb11541d924f..53371cd1529ce 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -48,6 +48,7 @@ import software.amazon.awssdk.services.s3.model.CreateMultipartUploadResponse; import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.DeletedObject; import software.amazon.awssdk.services.s3.model.GetObjectAttributesParts; import software.amazon.awssdk.services.s3.model.GetObjectAttributesRequest; import software.amazon.awssdk.services.s3.model.GetObjectAttributesResponse; @@ -92,7 +93,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -102,6 +102,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -286,9 +287,8 @@ public int numberOfPagesFetched() { } } - public void testDelete() throws IOException { + public void testDelete() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); int bulkDeleteSize = 5; @@ -297,147 +297,314 @@ public void testDelete() throws IOException { when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); final int totalPageCount = 3; final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); - final List keysDeleted = new ArrayList<>(); - AtomicInteger deleteCount = new AtomicInteger(); + List responses = new ArrayList<>(); + List allObjects = new ArrayList<>(); + long totalSize = 0; + + for (int i = 0; i < totalPageCount; i++) { + List pageObjects = new ArrayList<>(); + for (int j = 0; j < s3ObjectsPerPage; j++) { + pageObjects.add(S3Object.builder().key(randomAlphaOfLength(10)).size(s3ObjectSize).build()); + totalSize += s3ObjectSize; + } + allObjects.addAll(pageObjects); + responses.add(ListObjectsV2Response.builder().contents(pageObjects).build()); + } + + AtomicInteger counter = new AtomicInteger(); doAnswer(invocation -> { - DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); - deleteCount.getAndIncrement(); - logger.info("Object sizes are{}", deleteObjectsRequest.delete().objects().size()); - keysDeleted.addAll(deleteObjectsRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList())); - return DeleteObjectsResponse.builder().build(); - }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + int currentCounter = counter.getAndIncrement(); + if (currentCounter < responses.size()) { + subscriber.onNext(responses.get(currentCounter)); + } + if (currentCounter == responses.size() - 1) { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn( + CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build()) + ); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - DeleteResult deleteResult = blobContainer.delete(); - assertEquals(s3ObjectSize * s3ObjectsPerPage * totalPageCount, deleteResult.bytesDeleted()); - assertEquals(s3ObjectsPerPage * totalPageCount, deleteResult.blobsDeleted()); - // keysDeleted will have blobPath also - assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); - assertTrue(keysDeleted.contains(blobPath.buildAsString())); - // keysDeleted will have blobPath also - assertEquals((int) Math.ceil(((double) keysDeleted.size() + 1) / bulkDeleteSize), deleteCount.get()); - keysDeleted.remove(blobPath.buildAsString()); - assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference resultRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + resultRef.set(deleteResult); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail("Unexpected failure: " + e.getMessage()); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + DeleteResult result = resultRef.get(); + + assertEquals(totalSize, result.bytesDeleted()); + assertEquals(allObjects.size(), result.blobsDeleted()); + + verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class)); + int expectedDeleteCalls = (int) Math.ceil((double) allObjects.size() / bulkDeleteSize); + verify(s3AsyncClient, times(expectedDeleteCalls)).deleteObjects(any(DeleteObjectsRequest.class)); } - public void testDeleteItemLevelErrorsDuringDelete() { + public void testDeleteItemLevelErrorsDuringDelete() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + int bulkDeleteSize = 3; // Small size to force multiple delete requests + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + final int totalObjects = 10; + List s3Objects = new ArrayList<>(); + for (int i = 0; i < totalObjects; i++) { + s3Objects.add(S3Object.builder().key("key-" + i).size(100L).build()); + } - final List keysFailedDeletion = new ArrayList<>(); + AtomicBoolean onNext = new AtomicBoolean(false); doAnswer(invocation -> { - DeleteObjectsRequest deleteObjectsRequest = invocation.getArgument(0); - int i = 0; - for (ObjectIdentifier objectIdentifier : deleteObjectsRequest.delete().objects()) { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + if (onNext.compareAndSet(false, true)) { + subscriber.onNext(ListObjectsV2Response.builder().contents(s3Objects).build()); + } else { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + // Simulate item-level errors during delete + AtomicInteger deleteCallCount = new AtomicInteger(0); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenAnswer(invocation -> { + DeleteObjectsRequest request = invocation.getArgument(0); + List errors = new ArrayList<>(); + List deletedObjects = new ArrayList<>(); + + for (int i = 0; i < request.delete().objects().size(); i++) { if (i % 2 == 0) { - keysFailedDeletion.add(objectIdentifier.key()); + errors.add( + S3Error.builder() + .key(request.delete().objects().get(i).key()) + .code("InternalError") + .message("Simulated error") + .build() + ); + } else { + deletedObjects.add(DeletedObject.builder().key(request.delete().objects().get(i).key()).build()); } - i++; } - return DeleteObjectsResponse.builder() - .errors(keysFailedDeletion.stream().map(key -> S3Error.builder().key(key).build()).collect(Collectors.toList())) - .build(); - }).when(client).deleteObjects(any(DeleteObjectsRequest.class)); + + deleteCallCount.incrementAndGet(); + return CompletableFuture.completedFuture(DeleteObjectsResponse.builder().errors(errors).deleted(deletedObjects).build()); + }); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(AssertionError.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference resultRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + resultRef.set(deleteResult); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + + assertNull("Unexpected exception: " + exceptionRef.get(), exceptionRef.get()); + DeleteResult result = resultRef.get(); + assertNotNull("Expected DeleteResult but got null", result); + + // We expect half of the objects to be deleted successfully + // But as of today, the blob delete count and bytes is updated a bit earlier. + assertEquals(totalObjects, result.blobsDeleted()); + assertEquals(totalObjects * 100L, result.bytesDeleted()); + + verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class)); + + // Calculate expected number of deleteObjects calls + int expectedDeleteCalls = (int) Math.ceil((double) totalObjects / bulkDeleteSize); + assertEquals(expectedDeleteCalls, deleteCallCount.get()); } - public void testDeleteSdkExceptionDuringListOperation() { + public void testDeleteSdkExceptionDuringListOperation() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onError(new RuntimeException("Simulated listing error")); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(IOException.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected failure but got success"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(exceptionRef.get()); + assertEquals(IOException.class, exceptionRef.get().getClass()); + assertEquals("Failed to list objects for deletion", exceptionRef.get().getMessage()); } - public void testDeleteSdkExceptionDuringDeleteOperation() { + public void testDeleteSdkExceptionDuringDeleteOperation() throws Exception { final String bucketName = randomAlphaOfLengthBetween(1, 10); - final BlobPath blobPath = new BlobPath(); + int bulkDeleteSize = 5; final S3BlobStore blobStore = mock(S3BlobStore.class); when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); - final S3Client client = mock(S3Client.class); - doAnswer(invocation -> new AmazonS3Reference(client)).when(blobStore).clientReference(); + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + when(asyncClientReference.get()).thenReturn(AmazonAsyncS3WithCredentials.create(s3AsyncClient, s3AsyncClient, s3AsyncClient, null)); - ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); - final int totalPageCount = 3; - final long s3ObjectSize = ByteSizeUnit.MB.toBytes(5); - final int s3ObjectsPerPage = 5; - MockListObjectsV2ResponseIterator listObjectsV2ResponseIterator = new MockListObjectsV2ResponseIterator( - totalPageCount, - s3ObjectsPerPage, - s3ObjectSize - ); - when(listObjectsV2Iterable.iterator()).thenReturn(listObjectsV2ResponseIterator); - when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onNext( + ListObjectsV2Response.builder().contents(S3Object.builder().key("test-key").size(100L).build()).build() + ); + subscriber.onComplete(); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); - when(client.deleteObjects(any(DeleteObjectsRequest.class))).thenThrow(SdkException.builder().build()); + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + CompletableFuture failedFuture = new CompletableFuture<>(); + failedFuture.completeExceptionally(new RuntimeException("Simulated delete error")); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture); final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); - assertThrows(IOException.class, blobContainer::delete); + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected failure but got success"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertNotNull(exceptionRef.get()); + logger.error("", exceptionRef.get()); + assertTrue(exceptionRef.get() instanceof CompletionException); + assertEquals("java.lang.RuntimeException: Simulated delete error", exceptionRef.get().getMessage()); } public void testExecuteSingleUpload() throws IOException { diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java index 9ac1564c807c3..c0ee9cb6d980f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3RepositoryPluginTests.java @@ -8,6 +8,7 @@ package org.opensearch.repositories.s3; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.SizeUnit; import org.opensearch.common.unit.SizeValue; @@ -25,6 +26,8 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class S3RepositoryPluginTests extends OpenSearchTestCase { @@ -37,8 +40,6 @@ public void testGetExecutorBuilders() throws IOException { ThreadPool threadPool = null; try (S3RepositoryPlugin plugin = new S3RepositoryPlugin(settings, configPath)) { List> executorBuilders = plugin.getExecutorBuilders(settings); - assertNotNull(executorBuilders); - assertFalse(executorBuilders.isEmpty()); threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); final Executor executor = threadPool.executor(URGENT_FUTURE_COMPLETION); assertNotNull(executor); @@ -57,6 +58,12 @@ public void testGetExecutorBuilders() throws IOException { assertThat(info.getMax(), equalTo(size)); assertThat(openSearchThreadPoolExecutor.getMaximumPoolSize(), equalTo(size)); + ClusterService clusterService = mock(ClusterService.class); + when(clusterService.getSettings()).thenReturn(Settings.EMPTY); + plugin.createComponents(null, clusterService, threadPool, null, null, null, null, null, null, null, null); + assertNotNull(executorBuilders); + assertFalse(executorBuilders.isEmpty()); + final int availableProcessors = Runtime.getRuntime().availableProcessors(); if (processors > availableProcessors) { assertWarnings( diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index c27efa080ac4e..f554e6d1dc591 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -789,7 +789,6 @@ public void apply(Settings value, Settings current, Settings previous) { // Snapshot related Settings BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING, - BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, BlobStoreRepository.SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 998ae5e4791b7..9146cb3c4091c 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -50,7 +50,6 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.RepositoryCleanupInProgress; @@ -70,7 +69,6 @@ import org.opensearch.common.Randomness; import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -428,16 +426,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo Setting.Property.Final ); - /** - * Controls the fixed prefix for the snapshot shard blob path. cluster.snapshot.async-deletion.enable - */ - public static final Setting SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING = Setting.boolSetting( - "cluster.snapshot.async-deletion.enable", - true, - Setting.Property.NodeScope, - Setting.Property.Dynamic - ); - protected volatile boolean supportURLRepo; private volatile int maxShardBlobDeleteBatch; @@ -531,8 +519,6 @@ protected static long calculateMaxWithinIntLimit(long defaultThresholdOfHeap, lo private final String snapshotShardPathPrefix; - private volatile boolean enableAsyncDeletion; - protected final long repositoryDataCacheThreshold; /** @@ -587,8 +573,6 @@ protected BlobStoreRepository( this.recoverySettings = recoverySettings; this.remoteStoreSettings = new RemoteStoreSettings(clusterService.getSettings(), clusterService.getClusterSettings()); this.snapshotShardPathPrefix = SNAPSHOT_SHARD_PATH_PREFIX_SETTING.get(clusterService.getSettings()); - this.enableAsyncDeletion = SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.get(clusterService.getSettings()); - clusterService.getClusterSettings().addSettingsUpdateConsumer(SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, this::setEnableAsyncDeletion); this.repositoryDataCacheThreshold = SNAPSHOT_REPOSITORY_DATA_CACHE_THRESHOLD.get(clusterService.getSettings()).getBytes(); } @@ -2219,15 +2203,7 @@ private void executeOneStaleIndexDelete( private DeleteResult deleteContainer(BlobContainer container) throws IOException { long startTime = System.nanoTime(); - DeleteResult deleteResult; - if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { - // Use deleteAsync and wait for the result - PlainActionFuture future = new PlainActionFuture<>(); - ((AsyncMultiStreamBlobContainer) container).deleteAsync(future); - deleteResult = future.actionGet(); - } else { - deleteResult = container.delete(); - } + DeleteResult deleteResult = container.delete(); logger.debug(new ParameterizedMessage("[{}] Deleted {} in {}ns", metadata.name(), container.path(), startTime - System.nanoTime())); return deleteResult; } @@ -2862,13 +2838,7 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna private void deleteFromContainer(BlobContainer container, List blobs) throws IOException { logger.trace(() -> new ParameterizedMessage("[{}] Deleting {} from [{}]", metadata.name(), blobs, container.path())); long startTime = System.nanoTime(); - if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { - PlainActionFuture future = new PlainActionFuture<>(); - ((AsyncMultiStreamBlobContainer) container).deleteBlobsAsyncIgnoringIfNotExists(blobs, future); - future.actionGet(); - } else { - container.deleteBlobsIgnoringIfNotExists(blobs); - } + container.deleteBlobsIgnoringIfNotExists(blobs); logger.debug( () -> new ParameterizedMessage( "[{}] Deletion {} from [{}] took {}ns", @@ -4742,8 +4712,4 @@ public String toString() { return name; } } - - public void setEnableAsyncDeletion(boolean enableAsyncDeletion) { - this.enableAsyncDeletion = enableAsyncDeletion; - } } From 2eadf12c0dff4da120ecae085ccf3324b32c215c Mon Sep 17 00:00:00 2001 From: Shubh Sahu Date: Thu, 9 Jan 2025 11:14:21 +0530 Subject: [PATCH 13/37] Fix Shallow copy snapshot failures on closed index (#16868) * Fix shallow v1 snapshot failures on closed index Signed-off-by: Shubh Sahu * UT fix Signed-off-by: Shubh Sahu * Adding UT Signed-off-by: Shubh Sahu * small fix Signed-off-by: Shubh Sahu * Addressing comments Signed-off-by: Shubh Sahu * Addressing comments Signed-off-by: Shubh Sahu * Modifying IT to restore snapshot Signed-off-by: Shubh Sahu --------- Signed-off-by: Shubh Sahu Co-authored-by: Shubh Sahu --- CHANGELOG.md | 1 + .../opensearch/remotestore/RemoteStoreIT.java | 78 +++++++++++++++++ .../opensearch/index/shard/IndexShard.java | 16 ++++ .../opensearch/repositories/Repository.java | 39 +++++++++ .../blobstore/BlobStoreRepository.java | 54 ++++++++++-- .../snapshots/SnapshotShardsService.java | 63 +++++++++----- .../index/shard/RemoteIndexShardTests.java | 87 +++++++++++++++++++ .../SegmentReplicationIndexShardTests.java | 39 +++++++-- .../RepositoriesServiceTests.java | 2 + ...enSearchIndexLevelReplicationTestCase.java | 4 + 10 files changed, 346 insertions(+), 37 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b49368a20fa8..dbbfd96eefca2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) +- Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) ### Security diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index ebb911c739eb3..1c4585e38ee90 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -39,6 +39,9 @@ import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotInfo; +import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; @@ -1078,4 +1081,79 @@ public void testCloseIndexWithNoOpSyncAndFlushForAsyncTranslog() throws Interrup Thread.sleep(10000); ensureGreen(INDEX_NAME); } + + public void testSuccessfulShallowV1SnapshotPostIndexClose() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + String dataNode = internalCluster().startDataOnlyNodes(1).get(0); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, 10000L, -1)); + ensureGreen(INDEX_NAME); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "0ms")); + + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + logger.info("Create shallow snapshot setting enabled repo"); + String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; + Path shallowSnapshotRepoPath = randomRepoPath(); + Settings.Builder settings = Settings.builder() + .put("location", shallowSnapshotRepoPath) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); + createRepository(shallowSnapshotRepoName, "fs", settings); + + for (int i = 0; i < 10; i++) { + indexBulk(INDEX_NAME, 1); + } + flushAndRefresh(INDEX_NAME); + + logger.info("Verify shallow snapshot created before close"); + final String snapshot1 = "snapshot1"; + SnapshotInfo snapshotInfo1 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot1) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo1.state()); + assertTrue(snapshotInfo1.successfulShards() > 0); + assertEquals(0, snapshotInfo1.failedShards()); + + for (int i = 0; i < 10; i++) { + indexBulk(INDEX_NAME, 1); + } + + // close index + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(1000); + logger.info("Verify shallow snapshot created after close"); + final String snapshot2 = "snapshot2"; + + SnapshotInfo snapshotInfo2 = internalCluster().client() + .admin() + .cluster() + .prepareCreateSnapshot(shallowSnapshotRepoName, snapshot2) + .setIndices(INDEX_NAME) + .setWaitForCompletion(true) + .get() + .getSnapshotInfo(); + + assertEquals(SnapshotState.SUCCESS, snapshotInfo2.state()); + assertTrue(snapshotInfo2.successfulShards() > 0); + assertEquals(0, snapshotInfo2.failedShards()); + + // delete the index + cluster().wipeIndices(INDEX_NAME); + // try restoring the snapshot + RestoreSnapshotResponse restoreSnapshotResponse = clusterAdmin().prepareRestoreSnapshot(shallowSnapshotRepoName, snapshot2) + .setWaitForCompletion(true) + .execute() + .actionGet(); + assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); + ensureGreen(INDEX_NAME); + flushAndRefresh(INDEX_NAME); + assertBusy(() -> { assertHitCount(client(dataNode).prepareSearch(INDEX_NAME).setSize(0).get(), 20); }); + } } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index eb3999718ca5b..f5de4dfb5a933 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -1624,6 +1624,22 @@ public org.apache.lucene.util.Version minimumCompatibleVersion() { return luceneVersion == null ? indexSettings.getIndexVersionCreated().luceneVersion : luceneVersion; } + /** + * Fetches the last remote uploaded segment metadata file + * @return {@link RemoteSegmentMetadata} + * @throws IOException + */ + public RemoteSegmentMetadata fetchLastRemoteUploadedSegmentMetadata() throws IOException { + if (!indexSettings.isAssignedOnRemoteNode()) { + throw new IllegalStateException("Index is not assigned on Remote Node"); + } + RemoteSegmentMetadata lastUploadedMetadata = getRemoteDirectory().readLatestMetadataFile(); + if (lastUploadedMetadata == null) { + throw new FileNotFoundException("No metadata file found in remote store"); + } + return lastUploadedMetadata; + } + /** * Creates a new {@link IndexCommit} snapshot from the currently running engine. All resources referenced by this * commit won't be freed until the commit / snapshot is closed. diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 138bc13140aea..259c4a6e09ce7 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -416,6 +416,45 @@ default void snapshotRemoteStoreIndexShard( throw new UnsupportedOperationException(); } + /** + * Adds a reference of remote store data for a index commit point. + *

+ * The index commit point can be obtained by using {@link org.opensearch.index.engine.Engine#acquireLastIndexCommit} method. + * Or for closed index can be obtained by reading last remote uploaded metadata by using {@link org.opensearch.index.shard.IndexShard#fetchLastRemoteUploadedSegmentMetadata()} method. + * Repository implementations shouldn't release the snapshot index commit point. It is done by the method caller. + *

+ * As snapshot process progresses, implementation of this method should update {@link IndexShardSnapshotStatus} object and check + * {@link IndexShardSnapshotStatus#isAborted()} to see if the snapshot process should be aborted. + * @param store store to be snapshotted + * @param snapshotId snapshot id + * @param indexId id for the index being snapshotted + * @param snapshotIndexCommit commit point + * @param shardStateIdentifier a unique identifier of the state of the shard that is stored with the shard's snapshot and used + * to detect if the shard has changed between snapshots. If {@code null} is passed as the identifier + * snapshotting will be done by inspecting the physical files referenced by {@code snapshotIndexCommit} + * @param snapshotStatus snapshot status + * @param primaryTerm current Primary Term + * @param commitGeneration current commit generation + * @param startTime start time of the snapshot commit, this will be used as the start time for snapshot. + * @param indexFilesToFileLengthMap map of index files to file length + * @param listener listener invoked on completion + */ + default void snapshotRemoteStoreIndexShard( + Store store, + SnapshotId snapshotId, + IndexId indexId, + @Nullable IndexCommit snapshotIndexCommit, + @Nullable String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + long primaryTerm, + long commitGeneration, + long startTime, + @Nullable Map indexFilesToFileLengthMap, + ActionListener listener + ) { + throw new UnsupportedOperationException(); + } + /** * Restores snapshot of the shard. *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 9146cb3c4091c..93a7dc0cb06af 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -3714,6 +3714,33 @@ private void writeAtomic(BlobContainer container, final String blobName, final B } } + @Override + public void snapshotRemoteStoreIndexShard( + Store store, + SnapshotId snapshotId, + IndexId indexId, + IndexCommit snapshotIndexCommit, + @Nullable String shardStateIdentifier, + IndexShardSnapshotStatus snapshotStatus, + long primaryTerm, + long startTime, + ActionListener listener + ) { + snapshotRemoteStoreIndexShard( + store, + snapshotId, + indexId, + snapshotIndexCommit, + shardStateIdentifier, + snapshotStatus, + primaryTerm, + snapshotIndexCommit.getGeneration(), + startTime, + null, + listener + ); + } + @Override public void snapshotRemoteStoreIndexShard( Store store, @@ -3723,13 +3750,16 @@ public void snapshotRemoteStoreIndexShard( String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus, long primaryTerm, + long commitGeneration, long startTime, + Map indexFilesToFileLengthMap, ActionListener listener ) { if (isReadOnly()) { listener.onFailure(new RepositoryException(metadata.name(), "cannot snapshot shard on a readonly repository")); return; } + final ShardId shardId = store.shardId(); try { final String generation = snapshotStatus.generation(); @@ -3737,13 +3767,21 @@ public void snapshotRemoteStoreIndexShard( final BlobContainer shardContainer = shardContainer(indexId, shardId); long indexTotalFileSize = 0; - // local store is being used here to fetch the files metadata instead of remote store as currently - // remote store is mirroring the local store. - List fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames()); - Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit); - for (String fileName : fileNames) { - indexTotalFileSize += commitSnapshotMetadata.get(fileName).length(); + List fileNames; + + if (snapshotIndexCommit != null) { + // local store is being used here to fetch the files metadata instead of remote store as currently + // remote store is mirroring the local store. + fileNames = new ArrayList<>(snapshotIndexCommit.getFileNames()); + Store.MetadataSnapshot commitSnapshotMetadata = store.getMetadata(snapshotIndexCommit); + for (String fileName : fileNames) { + indexTotalFileSize += commitSnapshotMetadata.get(fileName).length(); + } + } else { + fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet()); + indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum(); } + int indexTotalNumberOfFiles = fileNames.size(); snapshotStatus.moveToStarted( @@ -3754,7 +3792,7 @@ public void snapshotRemoteStoreIndexShard( indexTotalFileSize ); - final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(snapshotIndexCommit.getGeneration()); + final IndexShardSnapshotStatus.Copy lastSnapshotStatus = snapshotStatus.moveToFinalize(commitGeneration); // now create and write the commit point logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId); @@ -3765,7 +3803,7 @@ public void snapshotRemoteStoreIndexShard( snapshotId.getName(), lastSnapshotStatus.getIndexVersion(), primaryTerm, - snapshotIndexCommit.getGeneration(), + commitGeneration, lastSnapshotStatus.getStartTime(), threadPool.absoluteTimeInMillis() - lastSnapshotStatus.getStartTime(), indexTotalNumberOfFiles, diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java index 8da36bbb8d4bd..1e2264593310d 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardsService.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; import org.opensearch.cluster.SnapshotsInProgress.ShardState; import org.opensearch.cluster.SnapshotsInProgress.State; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; @@ -63,6 +64,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.IndexShardSnapshotStatus.Stage; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; @@ -74,7 +76,6 @@ import org.opensearch.transport.TransportService; import java.io.IOException; -import java.nio.file.NoSuchFileException; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -371,7 +372,9 @@ private void snapshot( ActionListener listener ) { try { - final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()); + final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + final IndexShard indexShard = indexService.getShardOrNull(shardId.id()); + final boolean closedIndex = indexService.getMetadata().getState() == IndexMetadata.State.CLOSE; if (indexShard.routingEntry().primary() == false) { throw new IndexShardSnapshotFailedException(shardId, "snapshot should be performed only on primary"); } @@ -398,24 +401,42 @@ private void snapshot( if (remoteStoreIndexShallowCopy && indexShard.indexSettings().isRemoteStoreEnabled()) { long startTime = threadPool.relativeTimeInMillis(); long primaryTerm = indexShard.getOperationPrimaryTerm(); - // we flush first to make sure we get the latest writes snapshotted - wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); - IndexCommit snapshotIndexCommit = wrappedSnapshot.get(); - long commitGeneration = snapshotIndexCommit.getGeneration(); + long commitGeneration = 0L; + Map indexFilesToFileLengthMap = null; + IndexCommit snapshotIndexCommit = null; + try { + if (closedIndex) { + RemoteSegmentMetadata lastRemoteUploadedIndexCommit = indexShard.fetchLastRemoteUploadedSegmentMetadata(); + indexFilesToFileLengthMap = lastRemoteUploadedIndexCommit.getMetadata() + .entrySet() + .stream() + .collect(Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getLength())); + primaryTerm = lastRemoteUploadedIndexCommit.getPrimaryTerm(); + commitGeneration = lastRemoteUploadedIndexCommit.getGeneration(); + } else { + wrappedSnapshot = indexShard.acquireLastIndexCommitAndRefresh(true); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + } indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); - } catch (NoSuchFileException e) { - wrappedSnapshot.close(); - logger.warn( - "Exception while acquiring lock on primaryTerm = {} and generation = {}", - primaryTerm, - commitGeneration - ); - indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); - wrappedSnapshot = indexShard.acquireLastIndexCommit(false); - snapshotIndexCommit = wrappedSnapshot.get(); - commitGeneration = snapshotIndexCommit.getGeneration(); - indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } catch (IOException e) { + if (closedIndex) { + logger.warn("Exception while reading latest metadata file from remote store"); + listener.onFailure(e); + } else { + wrappedSnapshot.close(); + logger.warn( + "Exception while acquiring lock on primaryTerm = {} and generation = {}", + primaryTerm, + commitGeneration + ); + indexShard.flush(new FlushRequest(shardId.getIndexName()).force(true)); + wrappedSnapshot = indexShard.acquireLastIndexCommit(false); + snapshotIndexCommit = wrappedSnapshot.get(); + commitGeneration = snapshotIndexCommit.getGeneration(); + indexShard.acquireLockOnCommitData(snapshot.getSnapshotId().getUUID(), primaryTerm, commitGeneration); + } } try { repository.snapshotRemoteStoreIndexShard( @@ -423,11 +444,13 @@ private void snapshot( snapshot.getSnapshotId(), indexId, snapshotIndexCommit, - getShardStateId(indexShard, snapshotIndexCommit), + null, snapshotStatus, primaryTerm, + commitGeneration, startTime, - ActionListener.runBefore(listener, wrappedSnapshot::close) + indexFilesToFileLengthMap, + closedIndex ? listener : ActionListener.runBefore(listener, wrappedSnapshot::close) ); } catch (IndexShardSnapshotFailedException e) { logger.error( diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index 57a561bc8f2a3..4d85a3c491af8 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -12,6 +12,9 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.util.Version; import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.settings.Settings; @@ -20,6 +23,7 @@ import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.CheckpointInfoResponse; @@ -32,6 +36,11 @@ import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.test.CorruptionUtils; import org.opensearch.test.junit.annotations.TestLogging; import org.hamcrest.MatcherAssert; @@ -41,6 +50,7 @@ import java.nio.channels.FileChannel; import java.nio.file.Path; import java.nio.file.StandardOpenOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -55,6 +65,8 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -541,6 +553,81 @@ public void onReplicationFailure( } } + public void testShallowCopySnapshotForClosedIndexSuccessful() throws Exception { + try (ReplicationGroup shards = createGroup(0, settings)) { + final IndexShard primaryShard = shards.getPrimary(); + shards.startAll(); + shards.indexDocs(10); + shards.refresh("test"); + shards.flush(); + shards.assertAllEqual(10); + + RepositoriesService repositoriesService = createRepositoriesService(); + BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository("random"); + + doAnswer(invocation -> { + IndexShardSnapshotStatus snapshotStatus = invocation.getArgument(5); + long commitGeneration = invocation.getArgument(7); + long startTime = invocation.getArgument(8); + final Map indexFilesToFileLengthMap = invocation.getArgument(9); + ActionListener listener = invocation.getArgument(10); + if (indexFilesToFileLengthMap != null) { + List fileNames = new ArrayList<>(indexFilesToFileLengthMap.keySet()); + long indexTotalFileSize = indexFilesToFileLengthMap.values().stream().mapToLong(Long::longValue).sum(); + int indexTotalNumberOfFiles = fileNames.size(); + snapshotStatus.moveToStarted(startTime, 0, indexTotalNumberOfFiles, 0, indexTotalFileSize); + // Not performing actual snapshot, just modifying the state + snapshotStatus.moveToFinalize(commitGeneration); + snapshotStatus.moveToDone(System.currentTimeMillis(), snapshotStatus.generation()); + listener.onResponse(snapshotStatus.generation()); + return null; + } + listener.onResponse(snapshotStatus.generation()); + return null; + }).when(repository) + .snapshotRemoteStoreIndexShard(any(), any(), any(), any(), any(), any(), anyLong(), anyLong(), anyLong(), any(), any()); + + final SnapshotShardsService shardsService = getSnapshotShardsService( + primaryShard, + shards.getIndexMetadata(), + true, + repositoriesService + ); + final Snapshot snapshot1 = new Snapshot( + randomAlphaOfLength(10), + new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5)) + ); + + // Initialize the shallow copy snapshot + final ClusterState initState = addSnapshotIndex( + clusterService.state(), + snapshot1, + primaryShard, + SnapshotsInProgress.State.INIT, + true + ); + shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); + + // start the snapshot + shardsService.clusterChanged( + new ClusterChangedEvent( + "test", + addSnapshotIndex(clusterService.state(), snapshot1, primaryShard, SnapshotsInProgress.State.STARTED, true), + initState + ) + ); + + // Check the snapshot got completed successfully + assertBusy(() -> { + final IndexShardSnapshotStatus.Copy copy = shardsService.currentSnapshotShards(snapshot1) + .get(primaryShard.shardId) + .asCopy(); + final IndexShardSnapshotStatus.Stage stage = copy.getStage(); + assertEquals(IndexShardSnapshotStatus.Stage.DONE, stage); + }); + } + } + private RemoteStoreReplicationSource getRemoteStoreReplicationSource(IndexShard shard, Runnable postGetFilesRunnable) { return new RemoteStoreReplicationSource(shard) { @Override diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 2311fc582616f..f4f94baabd7b0 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -68,6 +68,7 @@ import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfoTests; @@ -892,10 +893,21 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { replicateSegments(primaryShard, shards.getReplicas()); shards.assertAllEqual(10); - final SnapshotShardsService shardsService = getSnapshotShardsService(replicaShard); + final SnapshotShardsService shardsService = getSnapshotShardsService( + replicaShard, + shards.getIndexMetadata(), + false, + createRepositoriesService() + ); final Snapshot snapshot = new Snapshot(randomAlphaOfLength(10), new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5))); - final ClusterState initState = addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.INIT); + final ClusterState initState = addSnapshotIndex( + clusterService.state(), + snapshot, + replicaShard, + SnapshotsInProgress.State.INIT, + false + ); shardsService.clusterChanged(new ClusterChangedEvent("test", initState, clusterService.state())); CountDownLatch latch = new CountDownLatch(1); @@ -907,7 +919,7 @@ public void testSnapshotWhileFailoverIncomplete() throws Exception { shardsService.clusterChanged( new ClusterChangedEvent( "test", - addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED), + addSnapshotIndex(clusterService.state(), snapshot, replicaShard, SnapshotsInProgress.State.STARTED, false), initState ) ); @@ -956,21 +968,30 @@ public void testComputeReplicationCheckpointNullInfosReturnsEmptyCheckpoint() th } } - private SnapshotShardsService getSnapshotShardsService(IndexShard replicaShard) { + protected SnapshotShardsService getSnapshotShardsService( + IndexShard indexShard, + IndexMetadata indexMetadata, + boolean closedIdx, + RepositoriesService repositoriesService + ) { final TransportService transportService = mock(TransportService.class); when(transportService.getThreadPool()).thenReturn(threadPool); final IndicesService indicesService = mock(IndicesService.class); final IndexService indexService = mock(IndexService.class); when(indicesService.indexServiceSafe(any())).thenReturn(indexService); - when(indexService.getShardOrNull(anyInt())).thenReturn(replicaShard); - return new SnapshotShardsService(settings, clusterService, createRepositoriesService(), transportService, indicesService); + when(indexService.getShardOrNull(anyInt())).thenReturn(indexShard); + when(indexService.getMetadata()).thenReturn( + new IndexMetadata.Builder(indexMetadata).state(closedIdx ? IndexMetadata.State.CLOSE : IndexMetadata.State.OPEN).build() + ); + return new SnapshotShardsService(settings, clusterService, repositoriesService, transportService, indicesService); } - private ClusterState addSnapshotIndex( + protected ClusterState addSnapshotIndex( ClusterState state, Snapshot snapshot, IndexShard shard, - SnapshotsInProgress.State snapshotState + SnapshotsInProgress.State snapshotState, + boolean shallowCopySnapshot ) { final Map shardsBuilder = new HashMap<>(); ShardRouting shardRouting = shard.shardRouting; @@ -991,7 +1012,7 @@ private ClusterState addSnapshotIndex( null, SnapshotInfoTests.randomUserMetadata(), VersionUtils.randomVersion(random()), - false + shallowCopySnapshot ); return ClusterState.builder(state) .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(Collections.singletonList(entry))) diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 4cd822c7d583b..1ec6d320762f2 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -774,7 +774,9 @@ public void snapshotRemoteStoreIndexShard( String shardStateIdentifier, IndexShardSnapshotStatus snapshotStatus, long primaryTerm, + long commitGeneration, long startTime, + Map indexFilesToFileLengthMap, ActionListener listener ) { diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index a5dc13c334513..062ebd2051f6e 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -289,6 +289,10 @@ protected EngineConfigFactory getEngineConfigFactory(IndexSettings indexSettings return new EngineConfigFactory(indexSettings); } + public IndexMetadata getIndexMetadata() { + return indexMetadata; + } + public int indexDocs(final int numOfDoc) throws Exception { for (int doc = 0; doc < numOfDoc; doc++) { final IndexRequest indexRequest = new IndexRequest(index.getName()).id(Integer.toString(docId.incrementAndGet())) From 2c7d7749871e5f21b224660877e5a2f1c6838b86 Mon Sep 17 00:00:00 2001 From: Rishikesh <62345295+Rishikesh1159@users.noreply.github.com> Date: Thu, 9 Jan 2025 10:35:16 -0800 Subject: [PATCH 14/37] Add Response Status Number in http trace logs. (#16978) Signed-off-by: Rishikesh1159 --- server/src/main/java/org/opensearch/http/HttpTracer.java | 3 ++- .../org/opensearch/http/AbstractHttpServerTransportTests.java | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/http/HttpTracer.java b/server/src/main/java/org/opensearch/http/HttpTracer.java index de1da4a20e294..e31cca21f6a54 100644 --- a/server/src/main/java/org/opensearch/http/HttpTracer.java +++ b/server/src/main/java/org/opensearch/http/HttpTracer.java @@ -116,10 +116,11 @@ void traceResponse( ) { logger.trace( new ParameterizedMessage( - "[{}][{}][{}][{}][{}] sent response to [{}] success [{}]", + "[{}][{}][{}][{}][{}][{}] sent response to [{}] success [{}]", requestId, opaqueHeader, restResponse.status(), + restResponse.status().getStatus(), restResponse.contentType(), contentLength, httpChannel, diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index a4295289c3109..cd6beffa6e195 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -285,6 +285,8 @@ public HttpStats stats() { + opaqueId + "\\]\\[" + (badRequest ? "BAD_REQUEST" : "OK") + + "\\]\\[" + + (badRequest ? "400" : "200") + "\\]\\[null\\]\\[0\\] sent response to \\[.*" ) ); From cc990c024fe5305f40daa6a1991cd3d9fa21467c Mon Sep 17 00:00:00 2001 From: kkewwei Date: Fri, 10 Jan 2025 07:43:29 +0800 Subject: [PATCH 15/37] support termQueryCaseInsensitive/termQuery can search from doc_value in flat_object/keyword field (#16974) Signed-off-by: kkewwei Signed-off-by: kkewwei --- CHANGELOG.md | 1 + .../92_flat_object_support_doc_values.yml | 50 +++- .../test/search/340_doc_values_field.yml | 35 ++- .../index/mapper/FlatObjectFieldMapper.java | 19 +- .../index/mapper/KeywordFieldMapper.java | 41 +++ .../mapper/FlatObjectFieldTypeTests.java | 283 ++++++++++++++++-- .../index/mapper/KeywordFieldTypeTests.java | 49 ++- 7 files changed, 436 insertions(+), 42 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dbbfd96eefca2..0cb11d1c45d38 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Introduce framework for auxiliary transports and an experimental gRPC transport plugin ([#16534](https://github.com/opensearch-project/OpenSearch/pull/16534)) - Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/)) - Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/)) +- Support searching from doc_value using termQueryCaseInsensitive/termQuery in flat_object/keyword field([#16974](https://github.com/opensearch-project/OpenSearch/pull/16974/)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml index c840276ee1157..266b41c6b5a77 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/92_flat_object_support_doc_values.yml @@ -45,6 +45,8 @@ setup: {"order":"order7","issue":{"labels":{"number":7,"name":"abc7","status":1}}} {"index":{"_index":"flat_object_doc_values_test","_id":"8"}} {"order":"order8","issue":{"labels":{"number":8,"name":"abc8","status":1}}} + {"index":{"_index":"flat_object_doc_values_test","_id":"9"}} + {"order":"order9","issue":{"labels":{"number":9,"name":"abC8","status":1}}} --- # Delete Index when connection is teardown @@ -68,7 +70,53 @@ teardown: } } - - length: { hits.hits: 9 } + - length: { hits.hits: 10 } + + # Case Insensitive Term Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels.name: { + value: "abc8", + case_insensitive: "true" + } + } + } + ] + } + } + } + + - length: { hits.hits: 2 } + + # Case Insensitive Term Query with no path. + - do: + search: + body: { + _source: true, + query: { + bool: { + must: [ + { + term: { + issue.labels: { + value: "abc8", + case_insensitive: "true" + } + } + } + ] + } + } + } + + - length: { hits.hits: 2 } # Term Query with exact dot path. - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml index 647aaf2c9088b..53ed730925595 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/340_doc_values_field.yml @@ -1121,8 +1121,8 @@ "search on fields with only doc_values enabled": - skip: features: [ "headers" ] - version: " - 2.18.99" - reason: "searching with only doc_values was finally added in 2.19.0" + version: " - 2.99.99" + reason: "searching with only doc_values was finally added in 3.0.0" - do: indices.create: index: test-doc-values @@ -1193,6 +1193,37 @@ - '{ "some_keyword": "400", "byte": 121, "double": 101.0, "float": "801.0", "half_float": "401.0", "integer": 1291, "long": 13457, "short": 151, "unsigned_long": 10223372036854775801, "ip_field": "192.168.0.2", "boolean": true, "date_nanos": "2020-10-29T12:12:12.123456789Z", "date": "2020-10-29T12:12:12.987Z" }' - '{ "index": { "_index": "test-doc-values", "_id": "3" } }' - '{ "some_keyword": "5", "byte": 122, "double": 102.0, "float": "802.0", "half_float": "402.0", "integer": 1292, "long": 13458, "short": 152, "unsigned_long": 10223372036854775802, "ip_field": "192.168.0.3", "boolean": false, "date_nanos": "2024-10-29T12:12:12.123456789Z", "date": "2024-10-29T12:12:12.987Z" }' + - '{ "index": { "_index": "test-doc-values", "_id": "4" } }' + - '{ "some_keyword": "Keyword1" }' + - '{ "index": { "_index": "test-doc-values", "_id": "5" } }' + - '{ "some_keyword": "keyword1" }' + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: { + "some_keyword": { + "value": "Keyword1" + } } + + - match: { hits.total: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: test-doc-values + body: + query: + term: { + "some_keyword": { + "value": "keyword1", + "case_insensitive": "true" + } } + + - match: { hits.total: 2 } - do: search: diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index 13063a4761006..4fe821ff74d34 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -15,7 +15,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; -import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FieldExistsQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; @@ -364,23 +363,17 @@ private KeywordFieldType valueFieldType() { return (mappedFieldTypeName == null) ? valueFieldType : valueAndPathFieldType; } + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + return valueFieldType().termQueryCaseInsensitive(rewriteValue(inputToString(value)), context); + } + /** * redirect queries with rewrite value to rewriteSearchValue and directSubFieldName */ @Override public Query termQuery(Object value, @Nullable QueryShardContext context) { - - String searchValueString = inputToString(value); - String directSubFieldName = directSubfield(); - String rewriteSearchValue = rewriteValue(searchValueString); - - failIfNotIndexed(); - Query query; - query = new TermQuery(new Term(directSubFieldName, indexedValueForSearch(rewriteSearchValue))); - if (boost() != 1f) { - query = new BoostQuery(query, boost()); - } - return query; + return valueFieldType().termQuery(rewriteValue(inputToString(value)), context); } @Override diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 90e43c818e137..4436e74c821c3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -39,6 +39,7 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MultiTermQuery; @@ -398,6 +399,46 @@ protected Object rewriteForDocValue(Object value) { return value; } + @Override + public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + if (isSearchable()) { + return super.termQueryCaseInsensitive(value, context); + } else { + BytesRef bytesRef = indexedValueForSearch(rewriteForDocValue(value)); + Term term = new Term(name(), bytesRef); + Query query = AutomatonQueries.createAutomatonQuery( + term, + AutomatonQueries.toCaseInsensitiveString(bytesRef.utf8ToString(), Operations.DEFAULT_DETERMINIZE_WORK_LIMIT), + MultiTermQuery.DOC_VALUES_REWRITE + ); + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; + } + } + + @Override + public Query termQuery(Object value, QueryShardContext context) { + failIfNotIndexedAndNoDocValues(); + if (isSearchable()) { + return super.termQuery(value, context); + } else { + Query query = SortedSetDocValuesField.newSlowRangeQuery( + name(), + indexedValueForSearch(rewriteForDocValue(value)), + indexedValueForSearch(rewriteForDocValue(value)), + true, + true + ); + if (boost() != 1f) { + query = new BoostQuery(query, boost()); + } + return query; + } + } + @Override public Query termsQuery(List values, QueryShardContext context) { failIfNotIndexedAndNoDocValues(); diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java index 38a6f13777f00..4160108342534 100644 --- a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java @@ -9,6 +9,7 @@ package org.opensearch.index.mapper; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.Term; import org.apache.lucene.search.FieldExistsQuery; @@ -24,6 +25,7 @@ import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.analysis.AnalyzerScope; import org.opensearch.index.analysis.NamedAnalyzer; @@ -138,39 +140,273 @@ public void testRewriteValue() { assertEquals("field.bar=foo", searchValuesDocPath); } - public void testTermQuery() { + public void testTermQueryCaseInsensitive() { - FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = + (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); + assertEquals( + AutomatonQueries.caseInsensitiveTermQuery(new Term("field._valueAndPath", "field.bar=fOo")), + dynamicMappedFieldType.termQueryCaseInsensitive("fOo", null) + ); + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + assertEquals( + AutomatonQueries.caseInsensitiveTermQuery(new Term("field._value", "fOo")), + ft.termQueryCaseInsensitive("fOo", null) + ); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("fOo"))); + + assertEquals(expected, ft.termQuery("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = AutomatonQueries.createAutomatonQuery( + new Term("field" + VALUE_SUFFIX, "field.fOo"), + AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = AutomatonQueries.createAutomatonQuery( + new Term("field" + VALUE_AND_PATH_SUFFIX, "field.fOo"), + AutomatonQueries.toCaseInsensitiveString("field.fOo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + + assertEquals(expected, ft.termQueryCaseInsensitive("fOo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType( "field", null, - true, - true + false, + false, + null, + Collections.emptyMap() + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.", + e.getMessage() + ); + } - // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field - String searchFieldName = (flatParentFieldType).directSubfield(); - String searchValues = (flatParentFieldType).rewriteValue("foo"); - assertEquals("foo", searchValues); - assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + public void testTermQuery() { - MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( - "field.bar", - flatParentFieldType.name(), - flatParentFieldType.getValueFieldType(), - flatParentFieldType.getValueAndPathFieldType() - ); + // 1.test isSearchable=true, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType flatParentFieldType = + (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType("field", null, true, true); - // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field - String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); - String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); - assertEquals("field.bar=foo", searchValuesDocPath); - assertEquals(new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), dynamicMappedFieldType.termQuery("foo", null)); + // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field + String searchFieldName = (flatParentFieldType).directSubfield(); + String searchValues = (flatParentFieldType).rewriteValue("foo"); + assertEquals("foo", searchValues); + assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType( + "field.bar", + flatParentFieldType.name(), + flatParentFieldType.getValueFieldType(), + flatParentFieldType.getValueAndPathFieldType() + ); + + // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field + String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); + String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); + assertEquals("field.bar=foo", searchValuesDocPath); + assertEquals( + new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), + dynamicMappedFieldType.termQuery("foo", null) + ); + + } + + // 2.test isSearchable=true, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_SUFFIX, new BytesRef("foo"))); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // test isSearchable=true, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + true, + false + ); + Query expected = new TermQuery(new Term("field" + VALUE_AND_PATH_SUFFIX, new BytesRef("foo"))); + + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 3.test isSearchable=false, hasDocValues=true, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + true + ); + Query expected = SortedSetDocValuesField.newSlowRangeQuery( + "field" + VALUE_SUFFIX, + new BytesRef("field.foo"), + new BytesRef("field.foo"), + true, + true + ); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + } + + // test isSearchable=false, hasDocValues=true, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + true + ); + Query expected = SortedSetDocValuesField.newSlowRangeQuery( + "field" + VALUE_AND_PATH_SUFFIX, + new BytesRef("field.foo"), + new BytesRef("field.foo"), + true, + true + ); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + } + + // 4.test isSearchable=false, hasDocValues=false, mappedFieldTypeName=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + null, + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + + // test isSearchable=false, hasDocValues=false, mappedFieldTypeName!=null + { + FlatObjectFieldMapper.FlatObjectFieldType ft = (FlatObjectFieldMapper.FlatObjectFieldType) getFlatParentFieldType( + "field", + "field", + false, + false + ); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field._valueAndPath] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType( "field", null, false, - true, + false, null, Collections.emptyMap() ); @@ -178,7 +414,10 @@ public void testTermQuery() { IllegalArgumentException.class, () -> unsearchable.termQuery("bar", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) ); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + assertEquals( + "Cannot search on field [field._value] since it is both not indexed, and does not have doc_values enabled.", + e.getMessage() + ); } public void testExistsQuery() { diff --git a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java index f291b864beb59..d52426c67d256 100644 --- a/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/KeywordFieldTypeTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.Term; import org.apache.lucene.search.DocValuesFieldExistsQuery; import org.apache.lucene.search.FuzzyQuery; @@ -60,6 +61,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; import org.opensearch.index.analysis.AnalyzerScope; @@ -100,13 +102,52 @@ public void testIsFieldWithinQuery() throws IOException { ); } + public void testTermQueryCaseInsensitive() { + MappedFieldType ft = new KeywordFieldType("field"); + Query expected = AutomatonQueries.caseInsensitiveTermQuery(new Term("field", BytesRefs.toBytesRef("foo"))); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Term term = new Term("field", "foo"); + + expected = AutomatonQueries.createAutomatonQuery( + term, + AutomatonQueries.toCaseInsensitiveString("foo", Integer.MAX_VALUE), + MultiTermQuery.DOC_VALUES_REWRITE + ); + assertEquals(expected, ft.termQueryCaseInsensitive("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQueryCaseInsensitive("foo", null)); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); + } + public void testTermQuery() { MappedFieldType ft = new KeywordFieldType("field"); - assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", null)); + assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + ft = new KeywordFieldType("field", true, false, Collections.emptyMap()); + assertEquals(new TermQuery(new Term("field", "foo")), ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); - MappedFieldType unsearchable = new KeywordFieldType("field", false, true, Collections.emptyMap()); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); - assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + ft = new KeywordFieldType("field", false, true, Collections.emptyMap()); + Query expected = SortedSetDocValuesField.newSlowRangeQuery("field", new BytesRef("foo"), new BytesRef("foo"), true, true); + assertEquals(expected, ft.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES)); + + MappedFieldType unsearchable = new KeywordFieldType("field", false, false, Collections.emptyMap()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> unsearchable.termQuery("foo", MOCK_QSC_ENABLE_INDEX_DOC_VALUES) + ); + assertEquals( + "Cannot search on field [field] since it is both not indexed, and does not have doc_values " + "enabled.", + e.getMessage() + ); } public void testTermQueryWithNormalizer() { From bbcbd216200c2d71e32f4fe062c6e040d907659d Mon Sep 17 00:00:00 2001 From: panguixin Date: Fri, 10 Jan 2025 09:53:11 +0800 Subject: [PATCH 16/37] use the correct type to widen the sort fields when merging top docs (#16881) * use the correct type to widen the sort fields when merging top docs Signed-off-by: panguixin * fix Signed-off-by: panguixin * apply commments Signed-off-by: panguixin * changelog Signed-off-by: panguixin * add more tests Signed-off-by: panguixin --------- Signed-off-by: panguixin --- CHANGELOG.md | 1 + .../opensearch/search/sort/FieldSortIT.java | 99 +++++++++++++++++++ .../action/search/SearchPhaseController.java | 54 ++++++---- .../sort/SortedWiderNumericSortField.java | 21 +++- 4 files changed, 152 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0cb11d1c45d38..9aabbbf75f00c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -66,6 +66,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) - Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909)) +- Use the correct type to widen the sort fields when merging top docs ([#16881](https://github.com/opensearch-project/OpenSearch/pull/16881)) ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) diff --git a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java index fdb12639c65be..cc837019d0b42 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/sort/FieldSortIT.java @@ -49,6 +49,7 @@ import org.opensearch.common.Numbers; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -63,6 +64,7 @@ import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.ParameterizedDynamicSettingsOpenSearchIntegTestCase; import org.hamcrest.Matchers; @@ -82,7 +84,9 @@ import java.util.Set; import java.util.TreeMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; +import java.util.function.Supplier; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.functionScoreQuery; @@ -2609,4 +2613,99 @@ public void testSimpleSortsPoints() throws Exception { assertThat(searchResponse.toString(), not(containsString("error"))); } + + public void testSortMixedIntegerNumericFields() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + AtomicInteger counter = new AtomicInteger(); + index("long", () -> Long.MAX_VALUE - counter.getAndIncrement()); + index("integer", () -> Integer.MAX_VALUE - counter.getAndIncrement()); + SearchResponse searchResponse = client().prepareSearch("long", "integer") + .setQuery(matchAllQuery()) + .setSize(10) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC).sortMode(SortMode.MAX)) + .get(); + assertNoFailures(searchResponse); + long[] sortValues = new long[10]; + for (int i = 0; i < 10; i++) { + sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).longValue(); + } + for (int i = 1; i < 10; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThan(sortValues[i])); + } + } + + public void testSortMixedFloatingNumericFields() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + AtomicInteger counter = new AtomicInteger(); + index("double", () -> 100.5 - counter.getAndIncrement()); + counter.set(0); + index("float", () -> 200.5 - counter.getAndIncrement()); + counter.set(0); + index("half_float", () -> 300.5 - counter.getAndIncrement()); + SearchResponse searchResponse = client().prepareSearch("double", "float", "half_float") + .setQuery(matchAllQuery()) + .setSize(15) + .addSort(SortBuilders.fieldSort("field").order(SortOrder.ASC).sortMode(SortMode.MAX)) + .get(); + assertNoFailures(searchResponse); + double[] sortValues = new double[15]; + for (int i = 0; i < 15; i++) { + sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(); + } + for (int i = 1; i < 15; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThan(sortValues[i])); + } + } + + public void testSortMixedFloatingAndIntegerNumericFields() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(3); + index("long", () -> randomLongBetween(0, (long) 2E53 - 1)); + index("integer", OpenSearchTestCase::randomInt); + index("double", OpenSearchTestCase::randomDouble); + index("float", () -> randomFloat()); + boolean asc = randomBoolean(); + SearchResponse searchResponse = client().prepareSearch("long", "integer", "double", "float") + .setQuery(matchAllQuery()) + .setSize(20) + .addSort(SortBuilders.fieldSort("field").order(asc ? SortOrder.ASC : SortOrder.DESC).sortMode(SortMode.MAX)) + .get(); + assertNoFailures(searchResponse); + double[] sortValues = new double[20]; + for (int i = 0; i < 20; i++) { + sortValues[i] = ((Number) searchResponse.getHits().getAt(i).getSortValues()[0]).doubleValue(); + } + if (asc) { + for (int i = 1; i < 20; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], lessThanOrEqualTo(sortValues[i])); + } + } else { + for (int i = 1; i < 20; i++) { + assertThat(Arrays.toString(sortValues), sortValues[i - 1], greaterThanOrEqualTo(sortValues[i])); + } + } + } + + private void index(String type, Supplier valueSupplier) throws Exception { + assertAcked( + prepareCreate(type).setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("field") + .field("type", type) + .endObject() + .endObject() + .endObject() + ).setSettings(Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 0)) + ); + ensureGreen(type); + for (int i = 0; i < 5; i++) { + client().prepareIndex(type) + .setId(Integer.toString(i)) + .setSource("{\"field\" : " + valueSupplier.get() + " }", XContentType.JSON) + .get(); + } + client().admin().indices().prepareRefresh(type).get(); + } + } diff --git a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java index 161a103cdf36a..d63695447e365 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/opensearch/action/search/SearchPhaseController.java @@ -48,6 +48,7 @@ import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.search.DocValueFormat; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; @@ -604,36 +605,51 @@ private static void validateMergeSortValueFormats(Collection comparator; + /** * Creates a sort, possibly in reverse, specifying how the sort value from the document's set is * selected. @@ -39,6 +43,15 @@ public class SortedWiderNumericSortField extends SortedNumericSortField { */ public SortedWiderNumericSortField(String field, Type type, boolean reverse) { super(field, type, reverse); + if (type == Type.LONG) { + byteCounts = Long.BYTES; + comparator = Comparator.comparingLong(Number::longValue); + } else if (type == Type.DOUBLE) { + byteCounts = Double.BYTES; + comparator = Comparator.comparingDouble(Number::doubleValue); + } else { + throw new IllegalArgumentException("Unsupported numeric type: " + type); + } } /** @@ -51,7 +64,7 @@ public SortedWiderNumericSortField(String field, Type type, boolean reverse) { */ @Override public FieldComparator getComparator(int numHits, Pruning pruning) { - return new NumericComparator(getField(), (Number) getMissingValue(), getReverse(), pruning, Double.BYTES) { + return new NumericComparator(getField(), (Number) getMissingValue(), getReverse(), pruning, byteCounts) { @Override public int compare(int slot1, int slot2) { throw new UnsupportedOperationException(); @@ -78,7 +91,7 @@ public int compareValues(Number first, Number second) { } else if (second == null) { return 1; } else { - return Double.compare(first.doubleValue(), second.doubleValue()); + return comparator.compare(first, second); } } }; From f6dc4a691d4bdf9b2d85e84c14a9c5c2e61e2460 Mon Sep 17 00:00:00 2001 From: panguixin Date: Fri, 10 Jan 2025 20:43:56 +0800 Subject: [PATCH 17/37] Fix multi-value sort for unsigned long (#16732) * Fix multi-value sort for unsigned long Signed-off-by: panguixin * Add initial rest-api-spec tests Signed-off-by: Andriy Redko * add more rest tests Signed-off-by: panguixin * fix Signed-off-by: panguixin * fix Signed-off-by: panguixin * Extend MultiValueMode with dedicated support of unsigned_long doc values Signed-off-by: Andriy Redko * Add CHANGELOG.md, minor cleanups Signed-off-by: Andriy Redko * Correct the license headers Signed-off-by: Andriy Redko * Correct the @PublicApi version Signed-off-by: Andriy Redko * Replace SingletonSortedNumericUnsignedLongValues with LongToSortedNumericUnsignedLongValues (as per review comments) Signed-off-by: Andriy Redko --------- Signed-off-by: panguixin Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + .../test/search/260_sort_double.yml | 136 +++++++ .../test/search/260_sort_long.yml | 137 +++++++ .../test/search/260_sort_unsigned_long.yml | 167 +++++++++ ...LongToSortedNumericUnsignedLongValues.java | 55 +++ .../SortedNumericUnsignedLongValues.java | 62 ++++ .../UnsignedLongValuesComparatorSource.java | 10 +- .../org/opensearch/search/MultiValueMode.java | 333 ++++++++++++++++++ .../search/MultiValueModeTests.java | 230 ++++++++++++ 9 files changed, 1126 insertions(+), 5 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml create mode 100644 server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java create mode 100644 server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9aabbbf75f00c..512ba48941c87 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) +- Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) ### Security diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml new file mode 100644 index 0000000000000..eccafaf96dd23 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_double.yml @@ -0,0 +1,136 @@ +setup: + - do: + indices.create: + index: double_sort + body: + settings: + number_of_shards: 3 + number_of_replicas: 0 + mappings: + properties: + field: + type: double + +--- +"test sorting against double only fields": + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "double_sort", "_id" : "1" } }' + - '{"field" : [ 900719925474099.1, 1.1 ] }' + - '{ "index" : { "_index" : "double_sort", "_id" : "2" } }' + - '{"field" : [ 900719925474099.2, 900719925474099.3 ] }' + - '{ "index" : { "_index" : "double_sort", "_id" : "3" } }' + - '{"field" : [ 450359962737049.4, 3.5, 4.6 ] }' + - '{ "index" : { "_index" : "double_sort", "_id" : "4" } }' + - '{"field" : [ 450359962737049.7, 5.8, -1.9, -2.0 ] }' + + - do: + search: + index: double_sort + body: + size: 5 + sort: [{ field: { mode: max, order: desc } } ] + - match: {hits.total.value: 4 } + - length: {hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.0.sort.0: 900719925474099.2 } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 900719925474099.1 } + - match: { hits.hits.2._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.2.sort.0: 450359962737049.7 } + - match: { hits.hits.3._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.3.sort.0: 450359962737049.4 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: max, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.0.sort.0: 450359962737049.4 } + - match: { hits.hits.1._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.1.sort.0: 450359962737049.7 } + - match: { hits.hits.2._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.2.sort.0: 900719925474099.1 } + - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.3.sort.0: 900719925474099.2 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: min, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.0.sort: [ -2.0 ] } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 1.1 } + - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.2.sort.0: 3.5 } + - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.3.sort.0: 900719925474099.2 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: median, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.0.sort.0: 900719925474099.2 } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 450359962737050.1 } + - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.2.sort.0: 4.6 } + - match: { hits.hits.3._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.3.sort.0: 1.95 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: avg, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.0.sort.0: 112589990684262.89 } + - match: { hits.hits.1._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.1.sort.0: 150119987579019.16 } + - match: { hits.hits.2._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.2.sort.0: 450359962737050.1 } + - match: { hits.hits.3._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.3.sort.0: 900719925474099.2 } + + - do: + search: + index: double_sort + body: + size: 5 + sort: [ { field: { mode: sum, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: double_sort } + - match: { hits.hits.0._source.field: [ 900719925474099.2, 900719925474099.2 ] } + - match: { hits.hits.0.sort.0: 1801439850948198.5 } + - match: { hits.hits.1._source.field: [ 900719925474099.1, 1.1 ] } + - match: { hits.hits.1.sort.0: 900719925474100.2 } + - match: { hits.hits.2._source.field: [ 450359962737049.4, 3.5, 4.6 ] } + - match: { hits.hits.2.sort.0: 450359962737057.5 } + - match: { hits.hits.3._source.field: [ 450359962737049.7, 5.8, -1.9, -2.0 ] } + - match: { hits.hits.3.sort.0: 450359962737051.56 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml new file mode 100644 index 0000000000000..f354dff6cbf02 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_long.yml @@ -0,0 +1,137 @@ +setup: + - do: + indices.create: + index: long_sort + body: + settings: + number_of_shards: 3 + number_of_replicas: 0 + mappings: + properties: + field: + type: long + +--- +"test sorting against long only fields": + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "long_sort", "_id" : "1" } }' + - '{"field" : [ 9223372036854775807, 1 ] }' + - '{ "index" : { "_index" : "long_sort", "_id" : "2" } }' + - '{"field" : [ 922337203685477777, 2 ] }' + - '{ "index" : { "_index" : "long_sort", "_id" : "3" } }' + - '{"field" : [ 2147483647, 3, 4 ] }' + - '{ "index" : { "_index" : "long_sort", "_id" : "4" } }' + - '{"field" : [ 2147483648, 5, -1, -2 ] }' + + - do: + search: + index: long_sort + body: + size: 5 + sort: [{ field: { mode: max, order: desc } } ] + - match: {hits.total.value: 4 } + - length: {hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.0.sort.0: 9223372036854775807 } + - match: { hits.hits.1._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.1.sort.0: 922337203685477777 } + - match: { hits.hits.2._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.2.sort.0: 2147483648 } + - match: { hits.hits.3._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.3.sort.0: 2147483647 } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: max, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.0.sort.0: 2147483647 } + - match: { hits.hits.1._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.1.sort.0: 2147483648 } + - match: { hits.hits.2._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.2.sort.0: 922337203685477777 } + - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.3.sort.0: 9223372036854775807 } + + + - do: + search: + index: long_sort + body: + size: 5 + sort: [{ field: { mode: min, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.0.sort.0: 3 } + - match: { hits.hits.1._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.1.sort.0: 2 } + - match: { hits.hits.2._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.2.sort.0: 1 } + - match: { hits.hits.3._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.3.sort: [ -2 ] } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: median, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.0.sort.0: 2 } + - match: { hits.hits.1._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.1.sort.0: 4 } + - match: { hits.hits.2._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.2.sort.0: 461168601842738880 } + - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.3.sort.0: 4611686018427387904 } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: avg, order: desc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.0.sort.0: 461168601842738880 } + - match: { hits.hits.1._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.1.sort.0: 715827885 } + - match: { hits.hits.2._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.2.sort.0: 536870913 } + - match: { hits.hits.3._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.3.sort: [ -4611686018427387904 ] } + + - do: + search: + index: long_sort + body: + size: 5 + sort: [ { field: { mode: sum, order: asc } } ] + - match: { hits.total.value: 4 } + - length: { hits.hits: 4 } + - match: { hits.hits.0._index: long_sort } + - match: { hits.hits.0._source.field: [ 9223372036854775807, 1 ] } + - match: { hits.hits.0.sort: [ -9223372036854775808 ] } + - match: { hits.hits.1._source.field: [ 2147483648, 5, -1, -2 ] } + - match: { hits.hits.1.sort.0: 2147483650 } + - match: { hits.hits.2._source.field: [ 2147483647, 3, 4 ] } + - match: { hits.hits.2.sort.0: 2147483654 } + - match: { hits.hits.3._source.field: [ 922337203685477777, 2 ] } + - match: { hits.hits.3.sort.0: 922337203685477779 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml new file mode 100644 index 0000000000000..056b2f58b2229 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/260_sort_unsigned_long.yml @@ -0,0 +1,167 @@ +setup: + - do: + indices.create: + index: unsigned_long_sort + body: + settings: + number_of_shards: 3 + number_of_replicas: 0 + mappings: + properties: + field: + type: unsigned_long + +--- +"test sorting against unsigned_long only fields": + - skip: + version: " - 2.19.99" + reason: "this change is added in 3.0.0" + + - do: + bulk: + refresh: true + body: + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "1" } }' + - '{"field" : [ 13835058055282163712, 1 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "2" } }' + - '{"field" : [ 13835058055282163713, 13835058055282163714 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "3" } }' + - '{"field" : [ 13835058055282163715, 13835058055282163716, 2 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "4" } }' + - '{"field" : [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "5" } }' + - '{"field" : [ 13835058055282163720, 13835058055282163721, 3, 4 ] }' + - '{ "index" : { "_index" : "unsigned_long_sort", "_id" : "6" } }' + - '{"field" : [ 13835058055282163722, 5, 6, 7 ] }' + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [{ field: { mode: max, order: desc } } ] + - match: {hits.total.value: 6 } + - length: {hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.0.sort.0: 13835058055282163722 } + - match: { hits.hits.1._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.1.sort.0: 13835058055282163721 } + - match: { hits.hits.2._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.2.sort.0: 13835058055282163719 } + - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.3.sort.0: 13835058055282163716 } + - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.4.sort.0: 13835058055282163714 } + - match: { hits.hits.5._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.5.sort.0: 13835058055282163712 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [{ field: { mode: max, order: asc } } ] + - match: {hits.total.value: 6 } + - length: {hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.0.sort.0: 13835058055282163712 } + - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.1.sort.0: 13835058055282163714 } + - match: { hits.hits.2._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.2.sort.0: 13835058055282163716 } + - match: { hits.hits.3._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.3.sort.0: 13835058055282163719 } + - match: { hits.hits.4._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.4.sort.0: 13835058055282163721 } + - match: { hits.hits.5._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.5.sort.0: 13835058055282163722 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: median, order: asc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.0.sort.0: 7 } + - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.1.sort.0: 4611686018427387906 } + - match: { hits.hits.2._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.2.sort.0: 6917529027641081857 } + - match: { hits.hits.3._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.3.sort.0: 6917529027641081862 } + - match: { hits.hits.4._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.4.sort.0: 13835058055282163715 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 13835058055282163718 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: sum, order: desc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.0.sort.0: 13835058055282163740 } + - match: { hits.hits.1._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.1.sort.0: 13835058055282163713 } + - match: { hits.hits.2._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.2.sort.0: 9223372036854775832 } + - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.3.sort.0: 9223372036854775817 } + - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.4.sort.0: 9223372036854775811 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 4611686018427387922 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: avg, order: desc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.0.sort.0: 6917529027641081857 } + - match: { hits.hits.1._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.1.sort.0: 4611686018427387906 } + - match: { hits.hits.2._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.2.sort.0: 3458764513820540935 } + - match: { hits.hits.3._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.3.sort.0: 3074457345618258606 } + - match: { hits.hits.4._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.4.sort.0: 2305843009213693958 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 1537228672809129307 } + + - do: + search: + index: unsigned_long_sort + body: + size: 10 + sort: [ { field: { mode: min, order: asc } } ] + - match: { hits.total.value: 6 } + - length: { hits.hits: 6 } + - match: { hits.hits.0._index: unsigned_long_sort } + - match: { hits.hits.0._source.field: [ 13835058055282163712, 1 ] } + - match: { hits.hits.0.sort.0: 1 } + - match: { hits.hits.1._source.field: [ 13835058055282163715, 13835058055282163716, 2 ] } + - match: { hits.hits.1.sort.0: 2 } + - match: { hits.hits.2._source.field: [ 13835058055282163720, 13835058055282163721, 3, 4 ] } + - match: { hits.hits.2.sort.0: 3 } + - match: { hits.hits.3._source.field: [ 13835058055282163722, 5, 6, 7 ] } + - match: { hits.hits.3.sort.0: 5 } + - match: { hits.hits.4._source.field: [ 13835058055282163713, 13835058055282163714 ] } + - match: { hits.hits.4.sort.0: 13835058055282163713 } + - match: { hits.hits.5._source.field: [ 13835058055282163717, 13835058055282163718, 13835058055282163719 ] } + - match: { hits.hits.5.sort.0: 13835058055282163717 } diff --git a/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java b/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java new file mode 100644 index 0000000000000..eb8d8f1667218 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/LongToSortedNumericUnsignedLongValues.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.fielddata; + +import org.apache.lucene.index.SortedNumericDocValues; + +import java.io.IOException; + +/** + * Wraps long-based {@link SortedNumericDocValues} as unsigned long ones + * (primarily used by {@link org.opensearch.search.MultiValueMode} + * + * @opensearch.internal + */ +public final class LongToSortedNumericUnsignedLongValues extends SortedNumericUnsignedLongValues { + private final SortedNumericDocValues values; + + public LongToSortedNumericUnsignedLongValues(SortedNumericDocValues values) { + this.values = values; + } + + @Override + public boolean advanceExact(int target) throws IOException { + return values.advanceExact(target); + } + + @Override + public long nextValue() throws IOException { + return values.nextValue(); + } + + @Override + public int docValueCount() { + return values.docValueCount(); + } + + public int advance(int target) throws IOException { + return values.advance(target); + } + + public int docID() { + return values.docID(); + } + + /** Return the wrapped values. */ + public SortedNumericDocValues getNumericUnsignedLongValues() { + return values; + } +} diff --git a/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java new file mode 100644 index 0000000000000..fa4c5152b9f90 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/fielddata/SortedNumericUnsignedLongValues.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.fielddata; + +import org.apache.lucene.index.SortedNumericDocValues; +import org.opensearch.common.annotation.PublicApi; + +import java.io.IOException; + +/** + * Clone of {@link SortedNumericDocValues} for unsigned long values. + * + * @opensearch.api + */ +@PublicApi(since = "2.19.0") +public abstract class SortedNumericUnsignedLongValues { + + /** Sole constructor. (For invocation by subclass + * constructors, typically implicit.) */ + protected SortedNumericUnsignedLongValues() {} + + /** Advance the iterator to exactly {@code target} and return whether + * {@code target} has a value. + * {@code target} must be greater than or equal to the current + * doc ID and must be a valid doc ID, ie. ≥ 0 and + * < {@code maxDoc}.*/ + public abstract boolean advanceExact(int target) throws IOException; + + /** + * Iterates to the next value in the current document. Do not call this more than + * {@link #docValueCount} times for the document. + */ + public abstract long nextValue() throws IOException; + + /** + * Retrieves the number of values for the current document. This must always + * be greater than zero. + * It is illegal to call this method after {@link #advanceExact(int)} + * returned {@code false}. + */ + public abstract int docValueCount(); + + /** + * Advances to the first beyond the current whose document number is greater than or equal to + * target, and returns the document number itself. Exhausts the iterator and returns {@link + * org.apache.lucene.search.DocIdSetIterator#NO_MORE_DOCS} if target is greater than the highest document number in the set. + * + * This method is being used by {@link org.apache.lucene.search.comparators.NumericComparator.NumericLeafComparator} when point values optimization kicks + * in and is implemented by most numeric types. + */ + public int advance(int target) throws IOException { + throw new UnsupportedOperationException(); + } + + public abstract int docID(); +} diff --git a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java index 9db5817450cd0..6fc85bd0b2689 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java +++ b/server/src/main/java/org/opensearch/index/fielddata/fieldcomparator/UnsignedLongValuesComparatorSource.java @@ -10,7 +10,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.LeafFieldComparator; @@ -24,6 +23,8 @@ import org.opensearch.index.fielddata.IndexFieldData; import org.opensearch.index.fielddata.IndexNumericFieldData; import org.opensearch.index.fielddata.LeafNumericFieldData; +import org.opensearch.index.fielddata.LongToSortedNumericUnsignedLongValues; +import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues; import org.opensearch.index.search.comparators.UnsignedLongComparator; import org.opensearch.search.DocValueFormat; import org.opensearch.search.MultiValueMode; @@ -57,14 +58,13 @@ public SortField.Type reducedType() { return SortField.Type.LONG; } - private SortedNumericDocValues loadDocValues(LeafReaderContext context) { + private SortedNumericUnsignedLongValues loadDocValues(LeafReaderContext context) { final LeafNumericFieldData data = indexFieldData.load(context); - SortedNumericDocValues values = data.getLongValues(); - return values; + return new LongToSortedNumericUnsignedLongValues(data.getLongValues()); } private NumericDocValues getNumericDocValues(LeafReaderContext context, BigInteger missingValue) throws IOException { - final SortedNumericDocValues values = loadDocValues(context); + final SortedNumericUnsignedLongValues values = loadDocValues(context); if (nested == null) { return FieldData.replaceMissing(sortMode.select(values), missingValue); } diff --git a/server/src/main/java/org/opensearch/search/MultiValueMode.java b/server/src/main/java/org/opensearch/search/MultiValueMode.java index a99da674836f2..fa2e776eca67a 100644 --- a/server/src/main/java/org/opensearch/search/MultiValueMode.java +++ b/server/src/main/java/org/opensearch/search/MultiValueMode.java @@ -42,6 +42,7 @@ import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.opensearch.common.Numbers; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -50,9 +51,11 @@ import org.opensearch.index.fielddata.AbstractNumericDocValues; import org.opensearch.index.fielddata.AbstractSortedDocValues; import org.opensearch.index.fielddata.FieldData; +import org.opensearch.index.fielddata.LongToSortedNumericUnsignedLongValues; import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues; import java.io.IOException; import java.util.Locale; @@ -143,6 +146,44 @@ protected double pick( return totalCount > 0 ? totalValue : missingValue; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + long total = 0; + for (int index = 0; index < count; ++index) { + total += values.nextValue(); + } + return total; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + int totalCount = 0; + long totalValue = 0; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + + final int docCount = values.docValueCount(); + for (int index = 0; index < docCount; ++index) { + totalValue += values.nextValue(); + } + totalCount += docCount; + } + } + return totalCount > 0 ? totalValue : missingValue; + } }, /** @@ -228,6 +269,46 @@ protected double pick( } return totalValue / totalCount; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + long total = 0; + for (int index = 0; index < count; ++index) { + total += values.nextValue(); + } + return count > 1 ? divideUnsignedAndRoundUp(total, count) : total; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + int totalCount = 0; + long totalValue = 0; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + final int docCount = values.docValueCount(); + for (int index = 0; index < docCount; ++index) { + totalValue += values.nextValue(); + } + totalCount += docCount; + } + } + if (totalCount < 1) { + return missingValue; + } + return totalCount > 1 ? divideUnsignedAndRoundUp(totalValue, totalCount) : totalValue; + } }, /** @@ -259,6 +340,45 @@ protected double pick(SortedNumericDoubleValues values) throws IOException { return values.nextValue(); } } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + int count = values.docValueCount(); + long firstValue = values.nextValue(); + if (count == 1) { + return firstValue; + } else if (count == 2) { + long total = firstValue + values.nextValue(); + return (total >>> 1) + (total & 1); + } else if (firstValue >= 0) { + for (int i = 1; i < (count - 1) / 2; ++i) { + values.nextValue(); + } + if (count % 2 == 0) { + long total = values.nextValue() + values.nextValue(); + return (total >>> 1) + (total & 1); + } else { + return values.nextValue(); + } + } + + final long[] docValues = new long[count]; + docValues[0] = firstValue; + int firstPositiveIndex = 0; + for (int i = 1; i < count; ++i) { + docValues[i] = values.nextValue(); + if (docValues[i] >= 0 && firstPositiveIndex == 0) { + firstPositiveIndex = i; + } + } + final int mid = ((count - 1) / 2 + firstPositiveIndex) % count; + if (count % 2 == 0) { + long total = docValues[mid] + docValues[(mid + 1) % count]; + return (total >>> 1) + (total & 1); + } else { + return docValues[mid]; + } + } }, /** @@ -382,6 +502,47 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc return hasValue ? ord : -1; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + final long min = values.nextValue(); + if (count == 1 || min > 0) { + return min; + } + for (int i = 1; i < count; ++i) { + long val = values.nextValue(); + if (val >= 0) { + return val; + } + } + return min; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + boolean hasValue = false; + long minValue = Numbers.MAX_UNSIGNED_LONG_VALUE_AS_LONG; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + final long docMin = pick(values); + minValue = Long.compareUnsigned(docMin, minValue) < 0 ? docMin : minValue; + hasValue = true; + } + } + return hasValue ? minValue : missingValue; + } }, /** @@ -525,6 +686,46 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc } return ord; } + + @Override + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + final int count = values.docValueCount(); + long max = values.nextValue(); + long val; + for (int i = 1; i < count; ++i) { + val = values.nextValue(); + if (max < 0 && val >= 0) { + return max; + } + max = val; + } + return max; + } + + @Override + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + boolean hasValue = false; + long maxValue = Numbers.MIN_UNSIGNED_LONG_VALUE_AS_LONG; + int count = 0; + for (int doc = startDoc; doc < endDoc; doc = docItr.nextDoc()) { + if (values.advanceExact(doc)) { + if (++count > maxChildren) { + break; + } + final long docMax = pick(values); + maxValue = Long.compareUnsigned(maxValue, docMax) < 0 ? docMax : maxValue; + hasValue = true; + } + } + return hasValue ? maxValue : missingValue; + } }; /** @@ -1032,6 +1233,126 @@ protected int pick(SortedDocValues values, DocIdSetIterator docItr, int startDoc throw new IllegalArgumentException("Unsupported sort mode: " + this); } + /** + * Return a {@link NumericDoubleValues} instance that can be used to sort documents + * with this mode and the provided values. When a document has no value, + * missingValue is returned. + *

+ * Allowed Modes: SUM, AVG, MEDIAN, MIN, MAX + */ + public NumericDocValues select(final SortedNumericUnsignedLongValues values) { + SortedNumericDocValues sortedNumericDocValues = null; + if (values instanceof LongToSortedNumericUnsignedLongValues) { + sortedNumericDocValues = ((LongToSortedNumericUnsignedLongValues) values).getNumericUnsignedLongValues(); + } + + final NumericDocValues singleton = DocValues.unwrapSingleton(sortedNumericDocValues); + if (singleton != null) { + return singleton; + } else { + return new AbstractNumericDocValues() { + + private long value; + + @Override + public boolean advanceExact(int target) throws IOException { + if (values.advanceExact(target)) { + value = pick(values); + return true; + } + return false; + } + + @Override + public int docID() { + return values.docID(); + } + + @Override + public long longValue() throws IOException { + return value; + } + }; + } + } + + protected long pick(SortedNumericUnsignedLongValues values) throws IOException { + throw new IllegalArgumentException("Unsupported sort mode: " + this); + } + + /** + * Return a {@link SortedDocValues} instance that can be used to sort root documents + * with this mode, the provided values and filters for root/inner documents. + *

+ * For every root document, the values of its inner documents will be aggregated. + *

+ * Allowed Modes: MIN, MAX + *

+ * NOTE: Calling the returned instance on docs that are not root docs is illegal + * The returned instance can only be evaluate the current and upcoming docs + */ + public NumericDocValues select( + final SortedNumericUnsignedLongValues values, + final long missingValue, + final BitSet parentDocs, + final DocIdSetIterator childDocs, + int maxDoc, + int maxChildren + ) throws IOException { + if (parentDocs == null || childDocs == null) { + return FieldData.replaceMissing(DocValues.emptyNumeric(), missingValue); + } + + return new AbstractNumericDocValues() { + + int lastSeenParentDoc = -1; + long lastEmittedValue = missingValue; + + @Override + public boolean advanceExact(int parentDoc) throws IOException { + assert parentDoc >= lastSeenParentDoc : "can only evaluate current and upcoming parent docs"; + if (parentDoc == lastSeenParentDoc) { + return true; + } else if (parentDoc == 0) { + lastEmittedValue = missingValue; + return true; + } + final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1); + final int firstChildDoc; + if (childDocs.docID() > prevParentDoc) { + firstChildDoc = childDocs.docID(); + } else { + firstChildDoc = childDocs.advance(prevParentDoc + 1); + } + + lastSeenParentDoc = parentDoc; + lastEmittedValue = pick(values, missingValue, childDocs, firstChildDoc, parentDoc, maxChildren); + return true; + } + + @Override + public int docID() { + return lastSeenParentDoc; + } + + @Override + public long longValue() { + return lastEmittedValue; + } + }; + } + + protected long pick( + SortedNumericUnsignedLongValues values, + long missingValue, + DocIdSetIterator docItr, + int startDoc, + int endDoc, + int maxChildren + ) throws IOException { + throw new IllegalArgumentException("Unsupported sort mode: " + this); + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeEnum(this); @@ -1040,4 +1361,16 @@ public void writeTo(StreamOutput out) throws IOException { public static MultiValueMode readMultiValueModeFrom(StreamInput in) throws IOException { return in.readEnum(MultiValueMode.class); } + + /** + * Copied from {@link Long#divideUnsigned(long, long)} and {@link Long#remainderUnsigned(long, long)} + */ + private static long divideUnsignedAndRoundUp(long dividend, long divisor) { + assert divisor > 0; + final long q = (dividend >>> 1) / divisor << 1; + final long r = dividend - q * divisor; + final long quotient = q + ((r | ~(r - divisor)) >>> (Long.SIZE - 1)); + final long rem = r - ((~(r - divisor) >> (Long.SIZE - 1)) & divisor); + return quotient + Math.round((double) rem / divisor); + } } diff --git a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java index 948d2cffceabe..e011dd0bcf6c0 100644 --- a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java +++ b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.util.BitSetIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.FixedBitSet; +import org.opensearch.common.Numbers; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.fielddata.AbstractBinaryDocValues; @@ -52,9 +53,13 @@ import org.opensearch.index.fielddata.NumericDoubleValues; import org.opensearch.index.fielddata.SortedBinaryDocValues; import org.opensearch.index.fielddata.SortedNumericDoubleValues; +import org.opensearch.index.fielddata.SortedNumericUnsignedLongValues; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.math.RoundingMode; import java.util.Arrays; import static org.hamcrest.Matchers.equalTo; @@ -776,6 +781,96 @@ public int docValueCount() { verifySortedSet(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs)); } + public void testSingleValuedUnsignedLongs() throws Exception { + final int numDocs = scaledRandomIntBetween(1, 100); + final long[] array = new long[numDocs]; + final FixedBitSet docsWithValue = randomBoolean() ? null : new FixedBitSet(numDocs); + for (int i = 0; i < array.length; ++i) { + if (randomBoolean()) { + array[i] = randomUnsignedLong().longValue(); + if (docsWithValue != null) { + docsWithValue.set(i); + } + } else if (docsWithValue != null && randomBoolean()) { + docsWithValue.set(i); + } + } + + final Supplier multiValues = () -> new SortedNumericUnsignedLongValues() { + int docId = -1; + + @Override + public boolean advanceExact(int target) throws IOException { + this.docId = target; + return docsWithValue == null || docsWithValue.get(docId); + } + + @Override + public int docID() { + return docId; + } + + @Override + public long nextValue() { + return array[docId]; + } + + @Override + public int docValueCount() { + return 1; + } + }; + verifySortedUnsignedLong(multiValues, numDocs); + final FixedBitSet rootDocs = randomRootDocs(numDocs); + final FixedBitSet innerDocs = randomInnerDocs(rootDocs); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, Integer.MAX_VALUE); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs)); + } + + public void testMultiValuedUnsignedLongs() throws Exception { + final int numDocs = scaledRandomIntBetween(1, 100); + final long[][] array = new long[numDocs][]; + for (int i = 0; i < numDocs; ++i) { + final long[] values = new long[randomInt(4)]; + for (int j = 0; j < values.length; ++j) { + values[j] = randomUnsignedLong().longValue(); + } + Arrays.sort(values); + array[i] = values; + } + final Supplier multiValues = () -> new SortedNumericUnsignedLongValues() { + int doc; + int i; + + @Override + public long nextValue() { + return array[doc][i++]; + } + + @Override + public boolean advanceExact(int doc) { + this.doc = doc; + i = 0; + return array[doc].length > 0; + } + + @Override + public int docValueCount() { + return array[doc].length; + } + + @Override + public int docID() { + return doc; + } + }; + verifySortedUnsignedLong(multiValues, numDocs); + final FixedBitSet rootDocs = randomRootDocs(numDocs); + final FixedBitSet innerDocs = randomInnerDocs(rootDocs); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, Integer.MAX_VALUE); + verifySortedUnsignedLong(multiValues, numDocs, rootDocs, innerDocs, randomIntBetween(1, numDocs)); + } + private void verifySortedSet(Supplier supplier, int maxDoc) throws IOException { for (MultiValueMode mode : new MultiValueMode[] { MultiValueMode.MIN, MultiValueMode.MAX }) { SortedSetDocValues values = supplier.get(); @@ -857,6 +952,141 @@ private void verifySortedSet( } } + private void verifySortedUnsignedLong(Supplier supplier, int maxDoc) throws IOException { + for (MultiValueMode mode : MultiValueMode.values()) { + SortedNumericUnsignedLongValues values = supplier.get(); + final NumericDocValues selected = mode.select(values); + for (int i = 0; i < maxDoc; ++i) { + Long actual = null; + if (selected.advanceExact(i)) { + actual = selected.longValue(); + verifyLongValueCanCalledMoreThanOnce(selected, actual); + } + + BigInteger expected = null; + if (values.advanceExact(i)) { + int numValues = values.docValueCount(); + if (mode == MultiValueMode.MAX) { + expected = Numbers.MIN_UNSIGNED_LONG_VALUE; + } else if (mode == MultiValueMode.MIN) { + expected = Numbers.MAX_UNSIGNED_LONG_VALUE; + } else { + expected = BigInteger.ZERO; + } + for (int j = 0; j < numValues; ++j) { + if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) { + expected = expected.add(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MIN) { + expected = expected.min(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MAX) { + expected = expected.max(Numbers.toUnsignedBigInteger(values.nextValue())); + } + } + if (mode == MultiValueMode.AVG) { + expected = Numbers.toUnsignedBigInteger(expected.longValue()); + expected = numValues > 1 + ? new BigDecimal(expected).divide(new BigDecimal(numValues), RoundingMode.HALF_UP).toBigInteger() + : expected; + } else if (mode == MultiValueMode.MEDIAN) { + final Long[] docValues = new Long[numValues]; + for (int j = 0; j < numValues; ++j) { + docValues[j] = values.nextValue(); + } + Arrays.sort(docValues, Long::compareUnsigned); + int value = numValues / 2; + if (numValues % 2 == 0) { + expected = Numbers.toUnsignedBigInteger(docValues[value - 1]) + .add(Numbers.toUnsignedBigInteger(docValues[value])); + expected = Numbers.toUnsignedBigInteger(expected.longValue()); + expected = new BigDecimal(expected).divide(new BigDecimal(2), RoundingMode.HALF_UP).toBigInteger(); + } else { + expected = Numbers.toUnsignedBigInteger(docValues[value]); + } + } + } + + final Long expectedLong = expected == null ? null : expected.longValue(); + assertEquals(mode.toString() + " docId=" + i, expectedLong, actual); + } + } + } + + private void verifySortedUnsignedLong( + Supplier supplier, + int maxDoc, + FixedBitSet rootDocs, + FixedBitSet innerDocs, + int maxChildren + ) throws IOException { + for (long missingValue : new long[] { 0, randomUnsignedLong().longValue() }) { + for (MultiValueMode mode : new MultiValueMode[] { + MultiValueMode.MIN, + MultiValueMode.MAX, + MultiValueMode.SUM, + MultiValueMode.AVG }) { + SortedNumericUnsignedLongValues values = supplier.get(); + final NumericDocValues selected = mode.select( + values, + missingValue, + rootDocs, + new BitSetIterator(innerDocs, 0L), + maxDoc, + maxChildren + ); + int prevRoot = -1; + for (int root = rootDocs.nextSetBit(0); root != -1; root = root + 1 < maxDoc ? rootDocs.nextSetBit(root + 1) : -1) { + assertTrue(selected.advanceExact(root)); + final long actual = selected.longValue(); + verifyLongValueCanCalledMoreThanOnce(selected, actual); + + BigInteger expected = BigInteger.ZERO; + if (mode == MultiValueMode.MAX) { + expected = Numbers.MIN_UNSIGNED_LONG_VALUE; + } else if (mode == MultiValueMode.MIN) { + expected = Numbers.MAX_UNSIGNED_LONG_VALUE; + } + int numValues = 0; + int count = 0; + for (int child = innerDocs.nextSetBit(prevRoot + 1); child != -1 && child < root; child = innerDocs.nextSetBit( + child + 1 + )) { + if (values.advanceExact(child)) { + if (++count > maxChildren) { + break; + } + for (int j = 0; j < values.docValueCount(); ++j) { + if (mode == MultiValueMode.SUM || mode == MultiValueMode.AVG) { + expected = expected.add(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MIN) { + expected = expected.min(Numbers.toUnsignedBigInteger(values.nextValue())); + } else if (mode == MultiValueMode.MAX) { + expected = expected.max(Numbers.toUnsignedBigInteger(values.nextValue())); + } + ++numValues; + } + } + } + final long expectedLong; + if (numValues == 0) { + expectedLong = missingValue; + } else if (mode == MultiValueMode.AVG) { + expected = Numbers.toUnsignedBigInteger(expected.longValue()); + expected = numValues > 1 + ? new BigDecimal(expected).divide(new BigDecimal(numValues), RoundingMode.HALF_UP).toBigInteger() + : expected; + expectedLong = expected.longValue(); + } else { + expectedLong = expected.longValue(); + } + + assertEquals(mode.toString() + " docId=" + root, expectedLong, actual); + + prevRoot = root; + } + } + } + } + public void testValidOrdinals() { assertThat(MultiValueMode.SUM.ordinal(), equalTo(0)); assertThat(MultiValueMode.AVG.ordinal(), equalTo(1)); From 9bb1fbe2d615602971cb786d06ff80ba377d1c7f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 10 Jan 2025 12:39:17 -0500 Subject: [PATCH 18/37] Update Gradle to 8.12 (#16884) Signed-off-by: Andriy Redko --- build.gradle | 8 +-- buildSrc/build.gradle | 2 +- .../org/opensearch/gradle/NoticeTask.groovy | 16 +++-- .../gradle/plugin/PluginBuildPlugin.groovy | 4 +- .../precommit/LicenseHeadersTask.groovy | 11 +++- .../opensearch/gradle/test/AntFixture.groovy | 11 +++- .../org/opensearch/gradle/EmptyDirTask.java | 9 ++- .../ExportOpenSearchBuildResourcesTask.java | 8 ++- .../org/opensearch/gradle/LoggedExec.java | 6 +- .../gradle/docker/DockerBuildTask.java | 13 ++-- .../precommit/DependencyLicensesTask.java | 17 ++++- .../gradle/precommit/FilePermissionsTask.java | 15 +++-- .../precommit/ForbiddenPatternsTask.java | 17 +++-- .../gradle/precommit/JarHellTask.java | 11 +++- .../gradle/precommit/LoggerUsageTask.java | 15 +++-- .../gradle/precommit/PomValidationTask.java | 11 +++- .../gradle/precommit/PrecommitTask.java | 11 +++- .../precommit/TestingConventionsTasks.java | 66 +++++++++---------- .../gradle/precommit/ThirdPartyAuditTask.java | 45 ++++++++----- .../test/ErrorReportingTestListener.java | 4 ++ .../gradle/test/GradleDistroTestTask.java | 18 +++-- .../gradle/test/RestIntegTestTask.java | 13 +++- .../gradle/test/RestTestBasePlugin.java | 2 +- .../org/opensearch/gradle/test/TestTask.java | 12 +++- .../gradle/test/rest/CopyRestApiTask.java | 17 +++-- .../gradle/test/rest/CopyRestTestsTask.java | 15 +++-- .../StandaloneRestIntegTestTask.java | 8 ++- .../testclusters/TestClustersAware.java | 8 ++- .../testfixtures/TestFixturesPlugin.java | 2 +- .../gradle/vagrant/VagrantShellTask.java | 20 ++++-- .../build.gradle | 6 +- distribution/build.gradle | 2 +- distribution/docker/build.gradle | 6 +- distribution/packages/build.gradle | 54 +++++++-------- doc-tools/build.gradle | 4 +- doc-tools/missing-doclet/build.gradle | 4 +- gradle/ide.gradle | 2 +- gradle/missing-javadoc.gradle | 13 +++- gradle/wrapper/gradle-wrapper.properties | 4 +- libs/common/build.gradle | 2 +- modules/aggs-matrix-stats/build.gradle | 4 +- modules/analysis-common/build.gradle | 4 +- modules/build.gradle | 2 +- modules/cache-common/build.gradle | 4 +- modules/geo/build.gradle | 4 +- modules/ingest-common/build.gradle | 4 +- modules/ingest-geoip/build.gradle | 4 +- modules/ingest-user-agent/build.gradle | 4 +- modules/lang-expression/build.gradle | 4 +- modules/lang-mustache/build.gradle | 4 +- modules/lang-painless/build.gradle | 4 +- modules/mapper-extras/build.gradle | 4 +- modules/opensearch-dashboards/build.gradle | 4 +- modules/parent-join/build.gradle | 4 +- modules/percolator/build.gradle | 4 +- modules/rank-eval/build.gradle | 4 +- modules/reindex/build.gradle | 4 +- modules/repository-url/build.gradle | 6 +- modules/search-pipeline-common/build.gradle | 4 +- modules/systemd/build.gradle | 4 +- modules/transport-netty4/build.gradle | 4 +- plugins/analysis-icu/build.gradle | 4 +- plugins/analysis-kuromoji/build.gradle | 4 +- plugins/analysis-nori/build.gradle | 4 +- plugins/analysis-phonenumber/build.gradle | 4 +- plugins/analysis-phonetic/build.gradle | 4 +- plugins/analysis-smartcn/build.gradle | 4 +- plugins/analysis-stempel/build.gradle | 4 +- plugins/analysis-ukrainian/build.gradle | 4 +- plugins/build.gradle | 6 +- plugins/cache-ehcache/build.gradle | 4 +- plugins/crypto-kms/build.gradle | 4 +- plugins/discovery-azure-classic/build.gradle | 4 +- plugins/discovery-ec2/build.gradle | 4 +- .../discovery-ec2/qa/amazon-ec2/build.gradle | 6 +- plugins/discovery-gce/build.gradle | 7 +- plugins/discovery-gce/qa/gce/build.gradle | 4 +- plugins/examples/custom-settings/build.gradle | 10 +-- .../build.gradle | 10 +-- .../examples/custom-suggester/build.gradle | 10 +-- .../examples/painless-allowlist/build.gradle | 10 +-- plugins/examples/rescore/build.gradle | 10 +-- plugins/examples/rest-handler/build.gradle | 12 ++-- .../script-expert-scoring/build.gradle | 10 +-- plugins/identity-shiro/build.gradle | 10 +-- plugins/ingest-attachment/build.gradle | 4 +- plugins/mapper-annotated-text/build.gradle | 4 +- plugins/mapper-murmur3/build.gradle | 4 +- plugins/mapper-size/build.gradle | 4 +- plugins/repository-azure/build.gradle | 4 +- plugins/repository-gcs/build.gradle | 4 +- plugins/repository-hdfs/build.gradle | 14 ++-- plugins/repository-s3/build.gradle | 4 +- plugins/store-smb/build.gradle | 4 +- plugins/telemetry-otel/build.gradle | 4 +- plugins/transport-grpc/build.gradle | 4 +- plugins/transport-nio/build.gradle | 4 +- plugins/transport-reactor-netty4/build.gradle | 4 +- plugins/workload-management/build.gradle | 4 +- qa/die-with-dignity/build.gradle | 4 +- qa/full-cluster-restart/build.gradle | 4 +- qa/mixed-cluster/build.gradle | 2 +- qa/multi-cluster-search/build.gradle | 2 +- qa/remote-clusters/build.gradle | 2 +- qa/repository-multi-version/build.gradle | 8 +-- qa/rolling-upgrade/build.gradle | 8 +-- qa/smoke-test-multinode/build.gradle | 2 +- qa/verify-version-constants/build.gradle | 2 +- sandbox/plugins/build.gradle | 6 +- server/build.gradle | 2 +- test/external-modules/build.gradle | 6 +- .../delayed-aggs/build.gradle | 4 +- test/fixtures/azure-fixture/build.gradle | 2 +- test/fixtures/gcs-fixture/build.gradle | 2 +- test/fixtures/s3-fixture/build.gradle | 2 +- 115 files changed, 521 insertions(+), 352 deletions(-) diff --git a/build.gradle b/build.gradle index f720b46bec143..679f7b9299248 100644 --- a/build.gradle +++ b/build.gradle @@ -127,8 +127,8 @@ subprojects { name = 'Snapshots' url = 'https://aws.oss.sonatype.org/content/repositories/snapshots' credentials { - username "$System.env.SONATYPE_USERNAME" - password "$System.env.SONATYPE_PASSWORD" + username = "$System.env.SONATYPE_USERNAME" + password = "$System.env.SONATYPE_PASSWORD" } } } @@ -420,7 +420,7 @@ allprojects { gradle.projectsEvaluated { allprojects { project.tasks.withType(JavaForkOptions) { - maxHeapSize project.property('options.forkOptions.memoryMaximumSize') + maxHeapSize = project.property('options.forkOptions.memoryMaximumSize') } if (project.path == ':test:framework') { @@ -736,7 +736,7 @@ tasks.named(JavaBasePlugin.CHECK_TASK_NAME) { } tasks.register('checkCompatibility', CheckCompatibilityTask) { - description('Checks the compatibility with child components') + description = 'Checks the compatibility with child components' } allprojects { project -> diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index c62f20e106e8c..f7fc0d7760993 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -106,7 +106,7 @@ dependencies { api "org.apache.commons:commons-compress:${props.getProperty('commonscompress')}" api 'org.apache.ant:ant:1.10.14' api 'com.netflix.nebula:gradle-extra-configurations-plugin:10.0.0' - api 'com.netflix.nebula:nebula-publishing-plugin:21.0.0' + api 'com.netflix.nebula:nebula-publishing-plugin:21.1.0' api 'com.netflix.nebula:gradle-info-plugin:12.1.6' api 'org.apache.rat:apache-rat:0.15' api "commons-io:commons-io:${props.getProperty('commonsio')}" diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy index 7b3a0fc01ab65..6a7a011d08dc4 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/NoticeTask.groovy @@ -30,6 +30,7 @@ package org.opensearch.gradle import org.gradle.api.DefaultTask +import org.gradle.api.Project import org.gradle.api.file.FileCollection import org.gradle.api.file.FileTree import org.gradle.api.file.SourceDirectorySet @@ -39,6 +40,8 @@ import org.gradle.api.tasks.Optional import org.gradle.api.tasks.OutputFile import org.gradle.api.tasks.TaskAction +import javax.inject.Inject + import java.nio.file.Files import java.nio.file.attribute.PosixFilePermissions @@ -58,8 +61,12 @@ class NoticeTask extends DefaultTask { /** Directories to include notices from */ private List licensesDirs = new ArrayList<>() - NoticeTask() { - description = 'Create a notice file from dependencies' + private final Project project + + @Inject + NoticeTask(Project project) { + this.project = project + this.description = 'Create a notice file from dependencies' // Default licenses directory is ${projectDir}/licenses (if it exists) File licensesDir = new File(project.projectDir, 'licenses') if (licensesDir.exists()) { @@ -161,11 +168,12 @@ class NoticeTask extends DefaultTask { @Optional FileCollection getNoticeFiles() { FileTree tree + def p = project licensesDirs.each { dir -> if (tree == null) { - tree = project.fileTree(dir) + tree = p.fileTree(dir) } else { - tree += project.fileTree(dir) + tree += p.fileTree(dir) } } diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 13f5f8724c6f2..ad4bdb3258fcc 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -160,14 +160,14 @@ class PluginBuildPlugin implements Plugin { archiveBaseName = archiveBaseName.get() + "-client" } // always configure publishing for client jars - project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client") + project.publishing.publications.nebula(MavenPublication).artifactId = extension.name + "-client" final BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) project.tasks.withType(GenerateMavenPom.class).configureEach { GenerateMavenPom generatePOMTask -> generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesName}-client-${project.versions.opensearch}.pom" } } else { if (project.plugins.hasPlugin(MavenPublishPlugin)) { - project.publishing.publications.nebula(MavenPublication).artifactId(extension.name) + project.publishing.publications.nebula(MavenPublication).artifactId = extension.name } } } diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy index b8d0ed2b9c43c..e3f7469b527c8 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/precommit/LicenseHeadersTask.groovy @@ -32,6 +32,7 @@ import org.apache.rat.anttasks.Report import org.apache.rat.anttasks.SubstringLicenseMatcher import org.apache.rat.license.SimpleLicenseFamily import org.opensearch.gradle.AntTask +import org.gradle.api.Project import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputFiles @@ -41,6 +42,8 @@ import org.gradle.api.tasks.PathSensitive import org.gradle.api.tasks.PathSensitivity import org.gradle.api.tasks.SkipWhenEmpty +import javax.inject.Inject + import java.nio.file.Files /** @@ -65,14 +68,18 @@ class LicenseHeadersTask extends AntTask { @Input List excludes = [] + private final Project project + /** * Additional license families that may be found. The key is the license category name (5 characters), * followed by the family name and the value list of patterns to search for. */ protected Map additionalLicenses = new HashMap<>() - LicenseHeadersTask() { - description = "Checks sources for missing, incorrect, or unacceptable license headers" + @Inject + LicenseHeadersTask(Project project) { + this.project = project + this.description = "Checks sources for missing, incorrect, or unacceptable license headers" } /** diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy index 316db8aa01764..42db92fd83515 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/test/AntFixture.groovy @@ -30,12 +30,16 @@ package org.opensearch.gradle.test import org.apache.tools.ant.taskdefs.condition.Os +import org.gradle.api.Project import org.gradle.api.GradleException import org.gradle.api.tasks.Exec import org.gradle.api.tasks.Internal import org.gradle.api.tasks.TaskProvider import org.opensearch.gradle.AntTask import org.opensearch.gradle.LoggedExec + +import javax.inject.Inject + /** * A fixture for integration tests which runs in a separate process launched by Ant. */ @@ -90,9 +94,12 @@ class AntFixture extends AntTask implements Fixture { } private final TaskProvider stopTask + private final Project project - AntFixture() { - stopTask = createStopTask() + @Inject + AntFixture(Project project) { + this.project = project + this.stopTask = createStopTask() finalizedBy(stopTask) } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java index 96d7c69699c68..36aa1f99aa894 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/EmptyDirTask.java @@ -32,6 +32,7 @@ package org.opensearch.gradle; import org.gradle.api.DefaultTask; +import org.gradle.api.Project; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.Internal; import org.gradle.api.tasks.TaskAction; @@ -48,6 +49,12 @@ public class EmptyDirTask extends DefaultTask { private File dir; private int dirMode = 0755; + private final Project project; + + @Inject + public EmptyDirTask(Project project) { + this.project = project; + } /** * Creates an empty directory with the configured permissions. @@ -84,7 +91,7 @@ public void setDir(File dir) { * @param dir The path of the directory to create. Takes a String and coerces it to a file. */ public void setDir(String dir) { - this.dir = getProject().file(dir); + this.dir = project.file(dir); } @Input diff --git a/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java index d00e790c94fcc..072b6fa788cbd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/ExportOpenSearchBuildResourcesTask.java @@ -33,6 +33,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.DirectoryProperty; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; @@ -42,6 +43,8 @@ import org.gradle.api.tasks.StopExecutionException; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -67,8 +70,9 @@ public class ExportOpenSearchBuildResourcesTask extends DefaultTask { private DirectoryProperty outputDir; - public ExportOpenSearchBuildResourcesTask() { - outputDir = getProject().getObjects().directoryProperty(); + @Inject + public ExportOpenSearchBuildResourcesTask(Project project) { + outputDir = project.getObjects().directoryProperty(); } @OutputDirectory diff --git a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java index 4c62f4a6b4ee8..3557ef6ef3df7 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/LoggedExec.java @@ -70,6 +70,7 @@ public class LoggedExec extends Exec implements FileSystemOperationsAware { private static final Logger LOGGER = Logging.getLogger(LoggedExec.class); private Consumer outputLogger; private FileSystemOperations fileSystemOperations; + private final Project project; interface InjectedExecOps { @Inject @@ -77,8 +78,9 @@ interface InjectedExecOps { } @Inject - public LoggedExec(FileSystemOperations fileSystemOperations) { + public LoggedExec(FileSystemOperations fileSystemOperations, Project project) { this.fileSystemOperations = fileSystemOperations; + this.project = project; if (getLogger().isInfoEnabled() == false) { setIgnoreExitValue(true); setSpoolOutput(false); @@ -111,7 +113,7 @@ public void execute(Task task) { public void setSpoolOutput(boolean spoolOutput) { final OutputStream out; if (spoolOutput) { - File spoolFile = new File(getProject().getBuildDir() + "/buffered-output/" + this.getName()); + File spoolFile = new File(project.getBuildDir() + "/buffered-output/" + this.getName()); out = new LazyFileOutputStream(spoolFile); outputLogger = logger -> { try { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java index 08f0e7488a43c..94a8592d9bc2f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerBuildTask.java @@ -34,6 +34,7 @@ import org.opensearch.gradle.LoggedExec; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.DirectoryProperty; import org.gradle.api.file.RegularFileProperty; import org.gradle.api.logging.Logger; @@ -60,18 +61,22 @@ public class DockerBuildTask extends DefaultTask { private static final Logger LOGGER = Logging.getLogger(DockerBuildTask.class); private final WorkerExecutor workerExecutor; - private final RegularFileProperty markerFile = getProject().getObjects().fileProperty(); - private final DirectoryProperty dockerContext = getProject().getObjects().directoryProperty(); + private final RegularFileProperty markerFile; + private final DirectoryProperty dockerContext; private String[] tags; private boolean pull = true; private boolean noCache = true; private String[] baseImages; + private final Project project; @Inject - public DockerBuildTask(WorkerExecutor workerExecutor) { + public DockerBuildTask(WorkerExecutor workerExecutor, Project project) { this.workerExecutor = workerExecutor; - this.markerFile.set(getProject().getLayout().getBuildDirectory().file("markers/" + this.getName() + ".marker")); + this.project = project; + this.markerFile = project.getObjects().fileProperty(); + this.dockerContext = project.getObjects().directoryProperty(); + this.markerFile.set(project.getLayout().getBuildDirectory().file("markers/" + this.getName() + ".marker")); } @TaskAction diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java index 7248e0bc14431..337ac5d62c3fd 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/DependencyLicensesTask.java @@ -36,6 +36,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.InvalidUserDataException; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logging; @@ -48,6 +49,8 @@ import org.gradle.api.tasks.OutputDirectory; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -127,7 +130,7 @@ public class DependencyLicensesTask extends DefaultTask { /** * The directory to find the license and sha files in. */ - private File licensesDir = new File(getProject().getProjectDir(), "licenses"); + private File licensesDir; /** * A map of patterns to prefix, used to find the LICENSE and NOTICE file. @@ -139,6 +142,14 @@ public class DependencyLicensesTask extends DefaultTask { */ private Set ignoreShas = new HashSet<>(); + private final Project project; + + @Inject + public DependencyLicensesTask(Project project) { + this.project = project; + this.licensesDir = new File(project.getProjectDir(), "licenses"); + } + /** * Add a mapping from a regex pattern for the jar name, to a prefix to find * the LICENSE and NOTICE file for that jar. @@ -161,7 +172,7 @@ public void mapping(Map props) { @InputFiles public Property getDependencies() { if (dependenciesProvider == null) { - dependenciesProvider = getProject().getObjects().property(FileCollection.class); + dependenciesProvider = project.getObjects().property(FileCollection.class); } return dependenciesProvider; } @@ -250,7 +261,7 @@ public void checkDependencies() throws IOException, NoSuchAlgorithmException { // by this output but when successful we can safely mark the task as up-to-date. @OutputDirectory public File getOutputMarker() { - return new File(getProject().getBuildDir(), "dependencyLicense"); + return new File(project.getBuildDir(), "dependencyLicense"); } private void failIfAnyMissing(String item, Boolean exists, String type) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java index 2c17666d8ee0c..0e5276bfdf033 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/FilePermissionsTask.java @@ -35,6 +35,7 @@ import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.tasks.IgnoreEmptyDirectories; @@ -48,6 +49,8 @@ import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.file.Files; @@ -71,10 +74,14 @@ public class FilePermissionsTask extends DefaultTask { // exclude sh files that might have the executable bit set .exclude("**/*.sh"); - private File outputMarker = new File(getProject().getBuildDir(), "markers/filePermissions"); + private final File outputMarker; + private final Project project; - public FilePermissionsTask() { + @Inject + public FilePermissionsTask(Project project) { setDescription("Checks java source files for correct file permissions"); + this.project = project; + this.outputMarker = new File(project.getBuildDir(), "markers/filePermissions"); } private static boolean isExecutableFile(File file) { @@ -98,11 +105,11 @@ private static boolean isExecutableFile(File file) { @IgnoreEmptyDirectories @PathSensitive(PathSensitivity.RELATIVE) public FileCollection getFiles() { - return GradleUtils.getJavaSourceSets(getProject()) + return GradleUtils.getJavaSourceSets(project) .stream() .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter)) .reduce(FileTree::plus) - .orElse(getProject().files().getAsFileTree()); + .orElse(project.files().getAsFileTree()); } @TaskAction diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java index 6ef1e77f5138f..1790b32fb2f36 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java @@ -34,6 +34,7 @@ import org.gradle.api.DefaultTask; import org.gradle.api.GradleException; import org.gradle.api.InvalidUserDataException; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; import org.gradle.api.plugins.JavaPluginExtension; @@ -48,6 +49,8 @@ import org.gradle.api.tasks.util.PatternFilterable; import org.gradle.api.tasks.util.PatternSet; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; @@ -89,8 +92,10 @@ public class ForbiddenPatternsTask extends DefaultTask { * The rules: a map from the rule name, to a rule regex pattern. */ private final Map patterns = new HashMap<>(); + private final Project project; - public ForbiddenPatternsTask() { + @Inject + public ForbiddenPatternsTask(Project project) { setDescription("Checks source files for invalid patterns like nocommits or tabs"); getInputs().property("excludes", filesFilter.getExcludes()); getInputs().property("rules", patterns); @@ -99,6 +104,8 @@ public ForbiddenPatternsTask() { patterns.put("nocommit", "nocommit|NOCOMMIT"); patterns.put("nocommit should be all lowercase or all uppercase", "((?i)nocommit)(? sourceSet.getAllSource().matching(filesFilter)) .reduce(FileTree::plus) - .orElse(getProject().files().getAsFileTree()); + .orElse(project.files().getAsFileTree()); } @TaskAction @@ -131,7 +138,7 @@ public void checkInvalidPatterns() throws IOException { .boxed() .collect(Collectors.toList()); - String path = getProject().getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString(); + String path = project.getRootProject().getProjectDir().toURI().relativize(f.toURI()).toString(); failures.addAll( invalidLines.stream() .map(l -> new AbstractMap.SimpleEntry<>(l + 1, lines.get(l))) @@ -155,7 +162,7 @@ public void checkInvalidPatterns() throws IOException { @OutputFile public File getOutputMarker() { - return new File(getProject().getBuildDir(), "markers/" + getName()); + return new File(project.getBuildDir(), "markers/" + getName()); } @Input diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java index 7726133562e9f..ebe0b25a3a685 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/JarHellTask.java @@ -33,11 +33,14 @@ package org.opensearch.gradle.precommit; import org.opensearch.gradle.LoggedExec; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.CompileClasspath; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; /** @@ -47,14 +50,18 @@ public class JarHellTask extends PrecommitTask { private FileCollection classpath; + private final Project project; - public JarHellTask() { + @Inject + public JarHellTask(Project project) { + super(project); setDescription("Runs CheckJarHell on the configured classpath"); + this.project = project; } @TaskAction public void runJarHellCheck() { - LoggedExec.javaexec(getProject(), spec -> { + LoggedExec.javaexec(project, spec -> { spec.environment("CLASSPATH", getClasspath().getAsPath()); spec.getMainClass().set("org.opensearch.bootstrap.JarHell"); }); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java index db215fb65ef95..70acdcc26c212 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java @@ -33,6 +33,7 @@ package org.opensearch.gradle.precommit; import org.opensearch.gradle.LoggedExec; +import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.CacheableTask; @@ -45,6 +46,8 @@ import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; /** @@ -54,14 +57,18 @@ public class LoggerUsageTask extends PrecommitTask { private FileCollection classpath; + private final Project project; - public LoggerUsageTask() { + @Inject + public LoggerUsageTask(Project project) { + super(project); setDescription("Runs LoggerUsageCheck on output directories of all source sets"); + this.project = project; } @TaskAction public void runLoggerUsageTask() { - LoggedExec.javaexec(getProject(), spec -> { + LoggedExec.javaexec(project, spec -> { spec.getMainClass().set("org.opensearch.test.loggerusage.OpenSearchLoggerUsageChecker"); spec.classpath(getClasspath()); getClassDirectories().forEach(spec::args); @@ -82,7 +89,7 @@ public void setClasspath(FileCollection classpath) { @SkipWhenEmpty @IgnoreEmptyDirectories public FileCollection getClassDirectories() { - return getProject().getExtensions() + return project.getExtensions() .getByType(JavaPluginExtension.class) .getSourceSets() .stream() @@ -93,7 +100,7 @@ public FileCollection getClassDirectories() { ) .map(sourceSet -> sourceSet.getOutput().getClassesDirs()) .reduce(FileCollection::plus) - .orElse(getProject().files()) + .orElse(project.files()) .filter(File::exists); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java index b76e0d6dd93cf..f7dea88cb2e30 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PomValidationTask.java @@ -35,10 +35,13 @@ import org.apache.maven.model.Model; import org.apache.maven.model.io.xpp3.MavenXpp3Reader; import org.gradle.api.GradleException; +import org.gradle.api.Project; import org.gradle.api.file.RegularFileProperty; import org.gradle.api.tasks.InputFile; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.FileReader; import java.util.Collection; import java.util.function.Consumer; @@ -46,10 +49,16 @@ public class PomValidationTask extends PrecommitTask { - private final RegularFileProperty pomFile = getProject().getObjects().fileProperty(); + private final RegularFileProperty pomFile; private boolean foundError; + @Inject + public PomValidationTask(Project project) { + super(project); + this.pomFile = project.getObjects().fileProperty(); + } + @InputFile public RegularFileProperty getPomFile() { return pomFile; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java index 52646206e4792..670614aa48087 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/PrecommitTask.java @@ -32,19 +32,28 @@ package org.opensearch.gradle.precommit; import org.gradle.api.DefaultTask; +import org.gradle.api.Project; import org.gradle.api.tasks.OutputFile; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.StandardOpenOption; public class PrecommitTask extends DefaultTask { + private final Project project; + + @Inject + public PrecommitTask(Project project) { + this.project = project; + } @OutputFile public File getSuccessMarker() { - return new File(getProject().getBuildDir(), "markers/" + this.getName()); + return new File(project.getBuildDir(), "markers/" + this.getName()); } @TaskAction diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java index d66b1f9d25cdd..9c1285914a03e 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/TestingConventionsTasks.java @@ -38,6 +38,7 @@ import org.opensearch.gradle.util.Util; import org.gradle.api.DefaultTask; import org.gradle.api.NamedDomainObjectContainer; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; @@ -85,12 +86,15 @@ public class TestingConventionsTasks extends DefaultTask { private Map testClassNames; private final NamedDomainObjectContainer naming; + private final Project project; - public TestingConventionsTasks() { + @Inject + public TestingConventionsTasks(Project project) { setDescription("Tests various testing conventions"); // Run only after everything is compiled - GradleUtils.getJavaSourceSets(getProject()).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs())); - naming = getProject().container(TestingConventionRule.class); + GradleUtils.getJavaSourceSets(project).all(sourceSet -> dependsOn(sourceSet.getOutput().getClassesDirs())); + this.naming = project.container(TestingConventionRule.class); + this.project = project; } @Inject @@ -100,38 +104,34 @@ protected Factory getPatternSetFactory() { @Input public Map> getClassFilesPerEnabledTask() { - return getProject().getTasks() - .withType(Test.class) - .stream() - .filter(Task::getEnabled) - .collect(Collectors.toMap(Task::getPath, task -> { - // See please https://docs.gradle.org/8.1/userguide/upgrading_version_8.html#test_task_default_classpath - final JvmTestSuite jvmTestSuite = JvmTestSuiteHelper.getDefaultTestSuite(getProject()).orElse(null); - if (jvmTestSuite != null) { - final PatternFilterable patternSet = getPatternSetFactory().create() - .include(task.getIncludes()) - .exclude(task.getExcludes()); - - final Set files = jvmTestSuite.getSources() - .getOutput() - .getClassesDirs() - .getAsFileTree() - .matching(patternSet) - .getFiles(); - - if (!files.isEmpty()) { - return files; - } + return project.getTasks().withType(Test.class).stream().filter(Task::getEnabled).collect(Collectors.toMap(Task::getPath, task -> { + // See please https://docs.gradle.org/8.1/userguide/upgrading_version_8.html#test_task_default_classpath + final JvmTestSuite jvmTestSuite = JvmTestSuiteHelper.getDefaultTestSuite(project).orElse(null); + if (jvmTestSuite != null) { + final PatternFilterable patternSet = getPatternSetFactory().create() + .include(task.getIncludes()) + .exclude(task.getExcludes()); + + final Set files = jvmTestSuite.getSources() + .getOutput() + .getClassesDirs() + .getAsFileTree() + .matching(patternSet) + .getFiles(); + + if (!files.isEmpty()) { + return files; } + } - return task.getCandidateClassFiles().getFiles(); - })); + return task.getCandidateClassFiles().getFiles(); + })); } @Input public Map getTestClassNames() { if (testClassNames == null) { - testClassNames = Util.getJavaTestSourceSet(getProject()) + testClassNames = Util.getJavaTestSourceSet(project) .get() .getOutput() .getClassesDirs() @@ -151,7 +151,7 @@ public NamedDomainObjectContainer getNaming() { @OutputFile public File getSuccessMarker() { - return new File(getProject().getBuildDir(), "markers/" + getName()); + return new File(project.getBuildDir(), "markers/" + getName()); } public void naming(Closure action) { @@ -160,7 +160,7 @@ public void naming(Closure action) { @Input public Set getMainClassNamedLikeTests() { - SourceSetContainer javaSourceSets = GradleUtils.getJavaSourceSets(getProject()); + SourceSetContainer javaSourceSets = GradleUtils.getJavaSourceSets(project); if (javaSourceSets.findByName(SourceSet.MAIN_SOURCE_SET_NAME) == null) { // some test projects don't have a main source set return Collections.emptySet(); @@ -195,7 +195,7 @@ public void doCheck() throws IOException { .stream() .collect(Collectors.toMap(Map.Entry::getValue, entry -> loadClassWithoutInitializing(entry.getKey(), isolatedClassLoader))); - final FileTree allTestClassFiles = getProject().files( + final FileTree allTestClassFiles = project.files( classes.values() .stream() .filter(isStaticClass.negate()) @@ -207,7 +207,7 @@ public void doCheck() throws IOException { final Map> classFilesPerTask = getClassFilesPerEnabledTask(); - final Set testSourceSetFiles = Util.getJavaTestSourceSet(getProject()).get().getRuntimeClasspath().getFiles(); + final Set testSourceSetFiles = Util.getJavaTestSourceSet(project).get().getRuntimeClasspath().getFiles(); final Map>> testClassesPerTask = classFilesPerTask.entrySet() .stream() .filter(entry -> testSourceSetFiles.containsAll(entry.getValue())) @@ -398,7 +398,7 @@ private boolean isAnnotated(Method method, Class annotation) { @Classpath public FileCollection getTestsClassPath() { - return Util.getJavaTestSourceSet(getProject()).get().getRuntimeClasspath(); + return Util.getJavaTestSourceSet(project).get().getRuntimeClasspath(); } private Map walkPathAndLoadClasses(File testRoot) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java index 6842f0e541abe..2ed801b7fb9c6 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ThirdPartyAuditTask.java @@ -40,6 +40,7 @@ import org.opensearch.gradle.util.GradleUtils; import org.gradle.api.DefaultTask; import org.gradle.api.JavaVersion; +import org.gradle.api.Project; import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; import org.gradle.api.file.FileCollection; @@ -107,7 +108,15 @@ public class ThirdPartyAuditTask extends DefaultTask { private FileCollection jdkJarHellClasspath; - private final Property targetCompatibility = getProject().getObjects().property(JavaVersion.class); + private final Project project; + + private final Property targetCompatibility; + + @Inject + public ThirdPartyAuditTask(Project project) { + this.project = project; + this.targetCompatibility = project.getObjects().property(JavaVersion.class); + } public boolean jarHellEnabled = true; @@ -124,7 +133,7 @@ public Property getTargetCompatibility() { @InputFiles @PathSensitive(PathSensitivity.NAME_ONLY) public Configuration getForbiddenAPIsConfiguration() { - return getProject().getConfigurations().getByName("forbiddenApisCliJar"); + return project.getConfigurations().getByName("forbiddenApisCliJar"); } @InputFile @@ -149,12 +158,12 @@ public void setJavaHome(String javaHome) { @Internal public File getJarExpandDir() { - return new File(new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"), getName()); + return new File(new File(project.getBuildDir(), "precommit/thirdPartyAudit"), getName()); } @OutputFile public File getSuccessMarker() { - return new File(getProject().getBuildDir(), "markers/" + getName()); + return new File(project.getBuildDir(), "markers/" + getName()); } // We use compile classpath normalization here because class implementation changes are irrelevant for the purposes of jdk jar hell. @@ -213,10 +222,10 @@ public Set getJarsToScan() { // err on the side of scanning these to make sure we don't miss anything Spec reallyThirdParty = dep -> dep.getGroup() != null && dep.getGroup().startsWith("org.opensearch") == false; - Set jars = GradleUtils.getFiles(getProject(), getRuntimeConfiguration(), reallyThirdParty).getFiles(); + Set jars = GradleUtils.getFiles(project, getRuntimeConfiguration(), reallyThirdParty).getFiles(); Set compileOnlyConfiguration = GradleUtils.getFiles( - getProject(), - getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME), + project, + project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME), reallyThirdParty ).getFiles(); // don't scan provided dependencies that we already scanned, e.x. don't scan cores dependencies for every plugin @@ -310,14 +319,14 @@ private Set extractJars(Set jars) { Set extractedJars = new TreeSet<>(); File jarExpandDir = getJarExpandDir(); // We need to clean up to make sure old dependencies don't linger - getProject().delete(jarExpandDir); + project.delete(jarExpandDir); jars.forEach(jar -> { String jarPrefix = jar.getName().replace(".jar", ""); File jarSubDir = new File(jarExpandDir, jarPrefix); extractedJars.add(jarSubDir); - FileTree jarFiles = getProject().zipTree(jar); - getProject().copy(spec -> { + FileTree jarFiles = project.zipTree(jar); + project.copy(spec -> { spec.from(jarFiles); spec.into(jarSubDir); // exclude classes from multi release jars @@ -336,8 +345,8 @@ private Set extractJars(Set jars) { IntStream.rangeClosed( Integer.parseInt(JavaVersion.VERSION_1_9.getMajorVersion()), Integer.parseInt(targetCompatibility.get().getMajorVersion()) - ).forEach(majorVersion -> getProject().copy(spec -> { - spec.from(getProject().zipTree(jar)); + ).forEach(majorVersion -> project.copy(spec -> { + spec.from(project.zipTree(jar)); spec.into(jarSubDir); String metaInfPrefix = "META-INF/versions/" + majorVersion; spec.include(metaInfPrefix + "/**"); @@ -376,7 +385,7 @@ private String formatClassList(Set classList) { private String runForbiddenAPIsCli() throws IOException { ByteArrayOutputStream errorOut = new ByteArrayOutputStream(); - InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class); + InjectedExecOps execOps = project.getObjects().newInstance(InjectedExecOps.class); ExecResult result = execOps.getExecOps().javaexec(spec -> { if (javaHome != null) { spec.setExecutable(javaHome + "/bin/java"); @@ -384,7 +393,7 @@ private String runForbiddenAPIsCli() throws IOException { spec.classpath( getForbiddenAPIsConfiguration(), getRuntimeConfiguration(), - getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) + project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) ); spec.jvmArgs("-Xmx1g"); spec.jvmArgs(LoggedExec.shortLivedArgs()); @@ -416,12 +425,12 @@ private String runForbiddenAPIsCli() throws IOException { */ private Set runJdkJarHellCheck(Set jars) throws IOException { ByteArrayOutputStream standardOut = new ByteArrayOutputStream(); - InjectedExecOps execOps = getProject().getObjects().newInstance(InjectedExecOps.class); + InjectedExecOps execOps = project.getObjects().newInstance(InjectedExecOps.class); ExecResult execResult = execOps.getExecOps().javaexec(spec -> { spec.classpath( jdkJarHellClasspath, getRuntimeConfiguration(), - getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) + project.getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) ); spec.getMainClass().set(JDK_JAR_HELL_MAIN_CLASS); spec.args(jars); @@ -442,9 +451,9 @@ private Set runJdkJarHellCheck(Set jars) throws IOException { } private Configuration getRuntimeConfiguration() { - Configuration runtime = getProject().getConfigurations().findByName("runtimeClasspath"); + Configuration runtime = project.getConfigurations().findByName("runtimeClasspath"); if (runtime == null) { - return getProject().getConfigurations().getByName("testCompileClasspath"); + return project.getConfigurations().getByName("testCompileClasspath"); } return runtime; } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java b/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java index aff9198e15772..4bdc75457ba75 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/ErrorReportingTestListener.java @@ -192,6 +192,10 @@ public Destination getDestination() { public String getMessage() { return message; } + + public long getLogTime() { + return System.currentTimeMillis(); + } }); } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java index fa417da1a1007..caac3ede98588 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/GradleDistroTestTask.java @@ -34,9 +34,12 @@ import org.opensearch.gradle.vagrant.VagrantMachine; import org.opensearch.gradle.vagrant.VagrantShellTask; +import org.gradle.api.Project; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.options.Option; +import javax.inject.Inject; + import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -49,6 +52,13 @@ public class GradleDistroTestTask extends VagrantShellTask { private String taskName; private String testClass; private List extraArgs = new ArrayList<>(); + private final Project project; + + @Inject + public GradleDistroTestTask(Project project) { + super(project); + this.project = project; + } public void setTaskName(String taskName) { this.taskName = taskName; @@ -84,17 +94,15 @@ protected List getLinuxScript() { } private List getScript(boolean isWindows) { - String cacheDir = getProject().getBuildDir() + "/gradle-cache"; + String cacheDir = project.getBuildDir() + "/gradle-cache"; StringBuilder line = new StringBuilder(); line.append(isWindows ? "& .\\gradlew " : "./gradlew "); line.append(taskName); line.append(" --project-cache-dir "); - line.append( - isWindows ? VagrantMachine.convertWindowsPath(getProject(), cacheDir) : VagrantMachine.convertLinuxPath(getProject(), cacheDir) - ); + line.append(isWindows ? VagrantMachine.convertWindowsPath(project, cacheDir) : VagrantMachine.convertLinuxPath(project, cacheDir)); line.append(" -S"); line.append(" --parallel"); - line.append(" -D'org.gradle.logging.level'=" + getProject().getGradle().getStartParameter().getLogLevel()); + line.append(" -D'org.gradle.logging.level'=" + project.getGradle().getStartParameter().getLogLevel()); if (testClass != null) { line.append(" --tests="); line.append(testClass); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java index aec31d02b9bee..474c04eabbcaf 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestIntegTestTask.java @@ -35,9 +35,12 @@ import groovy.lang.Closure; import org.opensearch.gradle.testclusters.StandaloneRestIntegTestTask; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.tasks.CacheableTask; +import javax.inject.Inject; + /** * Sub typed version of {@link StandaloneRestIntegTestTask} that is used to differentiate between plain standalone * integ test tasks based on {@link StandaloneRestIntegTestTask} and @@ -45,11 +48,19 @@ */ @CacheableTask public abstract class RestIntegTestTask extends StandaloneRestIntegTestTask implements TestSuiteConventionMappings { + private final Project project; + + @Inject + public RestIntegTestTask(Project project) { + super(project); + this.project = project; + } + @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { final Task t = super.configure(closure); - applyConventionMapping(getProject(), getConventionMapping()); + applyConventionMapping(project, getConventionMapping()); return t; } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java index ce5210482c055..24c4a46abfe29 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/RestTestBasePlugin.java @@ -55,7 +55,7 @@ public void apply(Project project) { .getExtensions() .getByName(TestClustersPlugin.EXTENSION_NAME); OpenSearchCluster cluster = testClusters.maybeCreate(restIntegTestTask.getName()); - restIntegTestTask.useCluster(cluster); + restIntegTestTask.useCluster(project, cluster); restIntegTestTask.include("**/*IT.class"); restIntegTestTask.systemProperty("tests.rest.load_packaged", Boolean.FALSE.toString()); if (System.getProperty(TESTS_REST_CLUSTER) == null) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java index f7511a2ac7f1c..abd40d2e0665a 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/TestTask.java @@ -10,17 +10,27 @@ import groovy.lang.Closure; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.testing.Test; +import javax.inject.Inject; + @CacheableTask public abstract class TestTask extends Test implements TestSuiteConventionMappings { + private final Project project; + + @Inject + public TestTask(Project project) { + this.project = project; + } + @SuppressWarnings("rawtypes") @Override public Task configure(Closure closure) { final Task t = super.configure(closure); - applyConventionMapping(getProject(), getConventionMapping()); + applyConventionMapping(project, getConventionMapping()); return t; } } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java index 485561a305291..4d6be4beaccf8 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java @@ -74,16 +74,20 @@ */ public class CopyRestApiTask extends DefaultTask { private static final String REST_API_PREFIX = "rest-api-spec/api"; - final ListProperty includeCore = getProject().getObjects().listProperty(String.class); + final ListProperty includeCore; String sourceSetName; boolean skipHasRestTestCheck; Configuration coreConfig; Configuration additionalConfig; + private final Project project; private final PatternFilterable corePatternSet; - public CopyRestApiTask() { - corePatternSet = getPatternSetFactory().create(); + @Inject + public CopyRestApiTask(Project project) { + this.project = project; + this.corePatternSet = getPatternSetFactory().create(); + this.includeCore = project.getObjects().listProperty(String.class); } @Inject @@ -133,8 +137,8 @@ public FileTree getInputDir() { } ConfigurableFileCollection fileCollection = additionalConfig == null - ? getProject().files(coreFileTree) - : getProject().files(coreFileTree, additionalConfig.getAsFileTree()); + ? project.files(coreFileTree) + : project.files(coreFileTree, additionalConfig.getAsFileTree()); // if project has rest tests or the includes are explicitly configured execute the task, else NO-SOURCE due to the null input return projectHasYamlRestTests || includeCore.get().isEmpty() == false ? fileCollection.getAsFileTree() : null; @@ -210,7 +214,7 @@ private boolean projectHasYamlRestTests() { .anyMatch(p -> p.getFileName().toString().endsWith("yml")); } } catch (IOException e) { - throw new IllegalStateException(String.format("Error determining if this project [%s] has rest tests.", getProject()), e); + throw new IllegalStateException(String.format("Error determining if this project [%s] has rest tests.", project), e); } return false; } @@ -240,7 +244,6 @@ private File getTestOutputResourceDir() { } private Optional getSourceSet() { - Project project = getProject(); return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName())); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java index 0d5af7ca06b50..6f7c99889e3a2 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java @@ -71,16 +71,20 @@ */ public class CopyRestTestsTask extends DefaultTask { private static final String REST_TEST_PREFIX = "rest-api-spec/test"; - final ListProperty includeCore = getProject().getObjects().listProperty(String.class); + final ListProperty includeCore; String sourceSetName; Configuration coreConfig; Configuration additionalConfig; + private final Project project; private final PatternFilterable corePatternSet; - public CopyRestTestsTask() { - corePatternSet = getPatternSetFactory().create(); + @Inject + public CopyRestTestsTask(Project project) { + this.project = project; + this.corePatternSet = getPatternSetFactory().create(); + this.includeCore = project.getObjects().listProperty(String.class); } @Inject @@ -123,8 +127,8 @@ public FileTree getInputDir() { } } ConfigurableFileCollection fileCollection = additionalConfig == null - ? getProject().files(coreFileTree) - : getProject().files(coreFileTree, additionalConfig.getAsFileTree()); + ? project.files(coreFileTree) + : project.files(coreFileTree, additionalConfig.getAsFileTree()); // copy tests only if explicitly requested return includeCore.get().isEmpty() == false || additionalConfig != null ? fileCollection.getAsFileTree() : null; @@ -178,7 +182,6 @@ void copy() { } private Optional getSourceSet() { - Project project = getProject(); return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName())); diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java index ddcbf77b0d5e6..5b883f8068825 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/StandaloneRestIntegTestTask.java @@ -36,6 +36,7 @@ import org.opensearch.gradle.FileSystemOperationsAware; import org.opensearch.gradle.test.Fixture; import org.opensearch.gradle.util.GradleUtils; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.provider.Provider; import org.gradle.api.services.internal.BuildServiceProvider; @@ -48,6 +49,8 @@ import org.gradle.internal.resources.ResourceLock; import org.gradle.internal.resources.SharedResource; +import javax.inject.Inject; + import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.util.ArrayList; @@ -67,7 +70,8 @@ public abstract class StandaloneRestIntegTestTask extends Test implements TestCl private Collection clusters = new HashSet<>(); private Closure beforeStart; - public StandaloneRestIntegTestTask() { + @Inject + public StandaloneRestIntegTestTask(Project project) { this.getOutputs() .doNotCacheIf( "Caching disabled for this task since it uses a cluster shared by other tasks", @@ -77,7 +81,7 @@ public StandaloneRestIntegTestTask() { * avoid any undesired behavior we simply disable the cache if we detect that this task uses a cluster shared between * multiple tasks. */ - t -> getProject().getTasks() + t -> project.getTasks() .withType(StandaloneRestIntegTestTask.class) .stream() .filter(task -> task != this) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java index e5c413df00d0d..f2eeec08fc71f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testclusters/TestClustersAware.java @@ -31,6 +31,7 @@ package org.opensearch.gradle.testclusters; +import org.gradle.api.Project; import org.gradle.api.Task; import org.gradle.api.artifacts.Configuration; import org.gradle.api.tasks.Nested; @@ -43,8 +44,13 @@ public interface TestClustersAware extends Task { @Nested Collection getClusters(); + @Deprecated(forRemoval = true) default void useCluster(OpenSearchCluster cluster) { - if (cluster.getPath().equals(getProject().getPath()) == false) { + useCluster(getProject(), cluster); + } + + default void useCluster(Project project, OpenSearchCluster cluster) { + if (cluster.getPath().equals(project.getPath()) == false) { throw new TestClustersException("Task " + getPath() + " can't use test cluster from" + " another project " + cluster); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index 79b5f837c75ce..c3b870e4ce5ad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -249,7 +249,7 @@ private void configureServiceInfoForTask( task.doFirst(new Action() { @Override public void execute(Task theTask) { - TestFixtureExtension extension = theTask.getProject().getExtensions().getByType(TestFixtureExtension.class); + TestFixtureExtension extension = fixtureProject.getExtensions().getByType(TestFixtureExtension.class); fixtureProject.getExtensions() .getByType(ComposeExtension.class) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java index ca1b95183505f..665f690b8b146 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/vagrant/VagrantShellTask.java @@ -33,9 +33,12 @@ package org.opensearch.gradle.vagrant; import org.gradle.api.DefaultTask; +import org.gradle.api.Project; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.TaskAction; +import javax.inject.Inject; + import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -55,13 +58,16 @@ public abstract class VagrantShellTask extends DefaultTask { private final VagrantExtension extension; private final VagrantMachine service; private UnaryOperator progressHandler = UnaryOperator.identity(); + private final Project project; - public VagrantShellTask() { - extension = getProject().getExtensions().findByType(VagrantExtension.class); - if (extension == null) { + @Inject + public VagrantShellTask(Project project) { + this.project = project; + this.extension = project.getExtensions().findByType(VagrantExtension.class); + if (this.extension == null) { throw new IllegalStateException("opensearch.vagrant-base must be applied to create " + getClass().getName()); } - service = getProject().getExtensions().getByType(VagrantMachine.class); + this.service = project.getExtensions().getByType(VagrantMachine.class); } @Input @@ -81,14 +87,14 @@ public void setProgressHandler(UnaryOperator progressHandler) { @TaskAction public void runScript() { - String rootDir = getProject().getRootDir().toString(); + String rootDir = project.getRootDir().toString(); if (extension.isWindowsVM()) { service.execute(spec -> { spec.setCommand("winrm"); List script = new ArrayList<>(); script.add("try {"); - script.add("cd " + convertWindowsPath(getProject(), rootDir)); + script.add("cd " + convertWindowsPath(project, rootDir)); extension.getVmEnv().forEach((k, v) -> script.add("$Env:" + k + " = \"" + v + "\"")); script.addAll(getWindowsScript().stream().map(s -> " " + s).collect(Collectors.toList())); script.addAll( @@ -111,7 +117,7 @@ public void runScript() { List script = new ArrayList<>(); script.add("sudo bash -c '"); // start inline bash script script.add("pwd"); - script.add("cd " + convertLinuxPath(getProject(), rootDir)); + script.add("cd " + convertLinuxPath(project, rootDir)); extension.getVmEnv().forEach((k, v) -> script.add("export " + k + "=" + v)); script.addAll(getLinuxScript()); script.add("'"); // end inline bash script diff --git a/client/client-benchmark-noop-api-plugin/build.gradle b/client/client-benchmark-noop-api-plugin/build.gradle index 8e4f40c096851..feec78547edb6 100644 --- a/client/client-benchmark-noop-api-plugin/build.gradle +++ b/client/client-benchmark-noop-api-plugin/build.gradle @@ -33,9 +33,9 @@ group = 'org.opensearch.plugin' apply plugin: 'opensearch.opensearchplugin' opensearchplugin { - name 'client-benchmark-noop-api' - description 'Stubbed out OpenSearch actions that can be used for client-side benchmarking' - classname 'org.opensearch.plugin.noop.NoopPlugin' + name = 'client-benchmark-noop-api' + description = 'Stubbed out OpenSearch actions that can be used for client-side benchmarking' + classname = 'org.opensearch.plugin.noop.NoopPlugin' } // Not published so no need to assemble diff --git a/distribution/build.gradle b/distribution/build.gradle index 36efe2e0d45e8..b04b04062134f 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -150,7 +150,7 @@ void copyModule(TaskProvider copyTask, Project module) { dependsOn moduleConfig from({ zipTree(moduleConfig.singleFile) }) { - includeEmptyDirs false + includeEmptyDirs = false // these are handled separately in the log4j config tasks below exclude '*/config/log4j2.properties' diff --git a/distribution/docker/build.gradle b/distribution/docker/build.gradle index 64471139e025b..d2b99ab051327 100644 --- a/distribution/docker/build.gradle +++ b/distribution/docker/build.gradle @@ -178,7 +178,7 @@ tasks.named("preProcessFixture").configure { } doLast { // tests expect to have an empty repo - project.delete( + delete( "${buildDir}/repo" ) createAndSetWritable( @@ -273,8 +273,8 @@ subprojects { Project subProject -> } artifacts.add('default', file(tarFile)) { - type 'tar' - name artifactName + type = 'tar' + name = artifactName builtBy exportTaskName } diff --git a/distribution/packages/build.gradle b/distribution/packages/build.gradle index e1fa4de5a0caa..ada19dfa38e78 100644 --- a/distribution/packages/build.gradle +++ b/distribution/packages/build.gradle @@ -111,21 +111,21 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { OS.current().equals(OS.WINDOWS) == false } dependsOn "process'${jdk ? '' : 'NoJdk'}${type.capitalize()}Files" - packageName "opensearch" + packageName = "opensearch" if (type == 'deb') { if (architecture == 'x64') { - arch('amd64') + arch = 'amd64' } else { assert architecture == 'arm64' : architecture - arch('arm64') + arch = 'arm64' } } else { assert type == 'rpm' : type if (architecture == 'x64') { - arch('x86_64') + arch = 'x86_64' } else { assert architecture == 'arm64' : architecture - arch('aarch64') + arch = 'aarch64' } } // Follow opensearch's file naming convention @@ -224,8 +224,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { } into('/etc') permissionGroup 'opensearch' - includeEmptyDirs true - createDirectoryEntry true + includeEmptyDirs = true + createDirectoryEntry = true include("opensearch") // empty dir, just to add directory entry include("opensearch/jvm.options.d") // empty dir, just to add directory entry } @@ -238,8 +238,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { unix 0660 } permissionGroup 'opensearch' - includeEmptyDirs true - createDirectoryEntry true + includeEmptyDirs = true + createDirectoryEntry = true fileType CONFIG | NOREPLACE } String envFile = expansionsForDistribution(type, jdk)['path.env'] @@ -298,8 +298,8 @@ Closure commonPackageConfig(String type, boolean jdk, String architecture) { into(file.parent) { from "${packagingFiles}/${file.parent}" include file.name - includeEmptyDirs true - createDirectoryEntry true + includeEmptyDirs = true + createDirectoryEntry = true user u permissionGroup g dirPermissions { @@ -320,13 +320,13 @@ apply plugin: 'com.netflix.nebula.ospackage-base' // this is package indepdendent configuration ospackage { - maintainer 'OpenSearch Team ' - summary 'Distributed RESTful search engine built for the cloud' - packageDescription ''' + maintainer ='OpenSearch Team ' + summary = 'Distributed RESTful search engine built for the cloud' + packageDescription = ''' Reference documentation can be found at https://github.com/opensearch-project/OpenSearch '''.stripIndent().trim() - url 'https://github.com/opensearch-project/OpenSearch' + url = 'https://github.com/opensearch-project/OpenSearch' // signing setup if (project.hasProperty('signing.password') && BuildParams.isSnapshotBuild() == false) { @@ -340,10 +340,10 @@ ospackage { // version found on oldest supported distro, centos-6 requires('coreutils', '8.4', GREATER | EQUAL) - fileMode 0644 - dirMode 0755 - user 'root' - permissionGroup 'root' + fileMode = 0644 + dirMode = 0755 + user = 'root' + permissionGroup = 'root' into '/usr/share/opensearch' } @@ -357,7 +357,7 @@ Closure commonDebConfig(boolean jdk, String architecture) { customFields['License'] = 'ASL-2.0' archiveVersion = project.version.replace('-', '~') - packageGroup 'web' + packageGroup = 'web' // versions found on oldest supported distro, centos-6 requires('bash', '4.1', GREATER | EQUAL) @@ -394,24 +394,24 @@ Closure commonRpmConfig(boolean jdk, String architecture) { return { configure(commonPackageConfig('rpm', jdk, architecture)) - license 'ASL-2.0' + license = 'ASL-2.0' - packageGroup 'Application/Internet' + packageGroup = 'Application/Internet' requires '/bin/bash' obsoletes packageName, '7.0.0', Flags.LESS prefix '/usr' - packager 'OpenSearch' + packager = 'OpenSearch' archiveVersion = project.version.replace('-', '_') release = '1' - os 'LINUX' - distribution 'OpenSearch' - vendor 'OpenSearch' + os = 'LINUX' + distribution = 'OpenSearch' + vendor = 'OpenSearch' // TODO ospackage doesn't support icon but we used to have one // without this the rpm will have parent dirs of any files we copy in, eg /etc/opensearch - addParentDirs false + addParentDirs = false } } diff --git a/doc-tools/build.gradle b/doc-tools/build.gradle index e6ace21420dda..9639c7d7048d6 100644 --- a/doc-tools/build.gradle +++ b/doc-tools/build.gradle @@ -3,8 +3,8 @@ plugins { } base { - group 'org.opensearch' - version '1.0.0-SNAPSHOT' + group = 'org.opensearch' + version = '1.0.0-SNAPSHOT' } repositories { diff --git a/doc-tools/missing-doclet/build.gradle b/doc-tools/missing-doclet/build.gradle index 114ccc948951a..c3c951fbcaf47 100644 --- a/doc-tools/missing-doclet/build.gradle +++ b/doc-tools/missing-doclet/build.gradle @@ -2,8 +2,8 @@ plugins { id 'java-library' } -group 'org.opensearch' -version '1.0.0-SNAPSHOT' +group = 'org.opensearch' +version = '1.0.0-SNAPSHOT' tasks.withType(JavaCompile) { options.compilerArgs += ["--release", targetCompatibility.toString()] diff --git a/gradle/ide.gradle b/gradle/ide.gradle index e266d9add172d..c16205468d63d 100644 --- a/gradle/ide.gradle +++ b/gradle/ide.gradle @@ -16,7 +16,7 @@ import org.jetbrains.gradle.ext.JUnit buildscript { repositories { maven { - url "https://plugins.gradle.org/m2/" + url = "https://plugins.gradle.org/m2/" } } dependencies { diff --git a/gradle/missing-javadoc.gradle b/gradle/missing-javadoc.gradle index 5a98a60e806ea..179c905c880b4 100644 --- a/gradle/missing-javadoc.gradle +++ b/gradle/missing-javadoc.gradle @@ -64,8 +64,8 @@ allprojects { tasks.register('missingJavadoc', MissingJavadocTask) { - description "This task validates and generates Javadoc API documentation for the main source code." - group "documentation" + description = "This task validates and generates Javadoc API documentation for the main source code." + group = "documentation" taskResources = resources dependsOn sourceSets.main.compileClasspath @@ -227,11 +227,18 @@ class MissingJavadocTask extends DefaultTask { @PathSensitive(PathSensitivity.RELATIVE) def taskResources + Project project + // See please https://docs.gradle.org/8.11/userguide/service_injection.html#execoperations interface InjectedExecOps { @Inject ExecOperations getExecOps() } + @Inject + MissingJavadocTask(Project project) { + this.project = project + } + /** Utility method to recursively collect all tasks with same name like this one that we depend on */ private Set findRenderTasksInDependencies() { Set found = [] @@ -350,7 +357,7 @@ class MissingJavadocTask extends DefaultTask { // force locale to be "en_US" (fix for: https://bugs.openjdk.java.net/browse/JDK-8222793) args += [ "-J-Duser.language=en", "-J-Duser.country=US" ] - ignoreExitValue true + ignoreExitValue = true } } diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index ec480eaeb61ef..8b3d2296213c2 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.11.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=89d4e70e4e84e2d2dfbb63e4daa53e21b25017cc70c37e4eea31ee51fb15098a +distributionSha256Sum=7ebdac923867a3cec0098302416d1e3c6c0c729fc4e2e05c10637a8af33a76c5 diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 60bf488833393..2bf2dbb803d9f 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -92,7 +92,7 @@ if (BuildParams.runtimeJavaVersion >= JavaVersion.VERSION_20) { } tasks.register('roundableSimdTest', Test) { - group 'verification' + group = 'verification' include '**/RoundableTests.class' systemProperty 'opensearch.experimental.feature.simd.rounding.enabled', 'forced' } diff --git a/modules/aggs-matrix-stats/build.gradle b/modules/aggs-matrix-stats/build.gradle index 705fa17456a79..fc3e009e0660e 100644 --- a/modules/aggs-matrix-stats/build.gradle +++ b/modules/aggs-matrix-stats/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' - classname 'org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin' + description = 'Adds aggregations whose input are a list of numeric fields and output includes a matrix.' + classname = 'org.opensearch.search.aggregations.matrix.MatrixAggregationModulePlugin' hasClientJar = true } diff --git a/modules/analysis-common/build.gradle b/modules/analysis-common/build.gradle index 58ecf79cda0d7..b0e1aaa2de814 100644 --- a/modules/analysis-common/build.gradle +++ b/modules/analysis-common/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Adds "built in" analyzers to OpenSearch.' - classname 'org.opensearch.analysis.common.CommonAnalysisModulePlugin' + description = 'Adds "built in" analyzers to OpenSearch.' + classname = 'org.opensearch.analysis.common.CommonAnalysisModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/build.gradle b/modules/build.gradle index 126bf0c8870ac..0c69a43af0509 100644 --- a/modules/build.gradle +++ b/modules/build.gradle @@ -35,7 +35,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { opensearchplugin { // for local OpenSearch plugins, the name of the plugin is the same as the directory - name project.name + name = project.name } if (project.file('src/main/packaging').exists()) { diff --git a/modules/cache-common/build.gradle b/modules/cache-common/build.gradle index 98cdec83b9ad1..996c47b26b4d9 100644 --- a/modules/cache-common/build.gradle +++ b/modules/cache-common/build.gradle @@ -9,8 +9,8 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for caches which are optional and do not require additional security permission' - classname 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin' + description = 'Module for caches which are optional and do not require additional security permission' + classname = 'org.opensearch.cache.common.tier.TieredSpilloverCachePlugin' } test { diff --git a/modules/geo/build.gradle b/modules/geo/build.gradle index 7ab6f80b65ca2..dc135ce7a4e35 100644 --- a/modules/geo/build.gradle +++ b/modules/geo/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations on GeoShape and GeoPoint' - classname 'org.opensearch.geo.GeoModulePlugin' + description = 'Plugin for geospatial features in OpenSearch. Registering the geo_shape and aggregations on GeoShape and GeoPoint' + classname = 'org.opensearch.geo.GeoModulePlugin' } restResources { diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 7b567eb9110c5..721aef35f5ff3 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' - classname 'org.opensearch.ingest.common.IngestCommonModulePlugin' + description = 'Module for ingest processors that do not require additional security permissions or have large dependencies and resources' + classname = 'org.opensearch.ingest.common.IngestCommonModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index f74de1dc290dd..3f74690e3ef4f 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -34,8 +34,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database' - classname 'org.opensearch.ingest.geoip.IngestGeoIpModulePlugin' + description = 'Ingest processor that uses looksup geo data based on ip adresses using the Maxmind geo database' + classname = 'org.opensearch.ingest.geoip.IngestGeoIpModulePlugin' } dependencies { diff --git a/modules/ingest-user-agent/build.gradle b/modules/ingest-user-agent/build.gradle index 187e72d192a3d..85206861ab5f2 100644 --- a/modules/ingest-user-agent/build.gradle +++ b/modules/ingest-user-agent/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Ingest processor that extracts information from a user agent' - classname 'org.opensearch.ingest.useragent.IngestUserAgentModulePlugin' + description = 'Ingest processor that extracts information from a user agent' + classname = 'org.opensearch.ingest.useragent.IngestUserAgentModulePlugin' } restResources { diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 94811cb608553..6efa3f3e667b5 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Lucene expressions integration for OpenSearch' - classname 'org.opensearch.script.expression.ExpressionModulePlugin' + description = 'Lucene expressions integration for OpenSearch' + classname = 'org.opensearch.script.expression.ExpressionModulePlugin' } dependencies { diff --git a/modules/lang-mustache/build.gradle b/modules/lang-mustache/build.gradle index a836124f94b41..4aaaa9fea1c59 100644 --- a/modules/lang-mustache/build.gradle +++ b/modules/lang-mustache/build.gradle @@ -32,8 +32,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Mustache scripting integration for OpenSearch' - classname 'org.opensearch.script.mustache.MustacheModulePlugin' + description = 'Mustache scripting integration for OpenSearch' + classname = 'org.opensearch.script.mustache.MustacheModulePlugin' hasClientJar = true // For the template apis and query } diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index ffb1fe6117c06..3895c512c61b4 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -36,8 +36,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'An easy, safe and fast scripting language for OpenSearch' - classname 'org.opensearch.painless.PainlessModulePlugin' + description = 'An easy, safe and fast scripting language for OpenSearch' + classname = 'org.opensearch.painless.PainlessModulePlugin' } ext { diff --git a/modules/mapper-extras/build.gradle b/modules/mapper-extras/build.gradle index b16176ca5aa72..1867abafc79c8 100644 --- a/modules/mapper-extras/build.gradle +++ b/modules/mapper-extras/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.java-rest-test' opensearchplugin { - description 'Adds advanced field mappers' - classname 'org.opensearch.index.mapper.MapperExtrasModulePlugin' + description = 'Adds advanced field mappers' + classname = 'org.opensearch.index.mapper.MapperExtrasModulePlugin' hasClientJar = true } diff --git a/modules/opensearch-dashboards/build.gradle b/modules/opensearch-dashboards/build.gradle index 07453e1f70f1c..8c590a348a9c4 100644 --- a/modules/opensearch-dashboards/build.gradle +++ b/modules/opensearch-dashboards/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.java-rest-test' opensearchplugin { - description 'Plugin exposing APIs for OpenSearch Dashboards system indices' - classname 'org.opensearch.dashboards.OpenSearchDashboardsModulePlugin' + description = 'Plugin exposing APIs for OpenSearch Dashboards system indices' + classname = 'org.opensearch.dashboards.OpenSearchDashboardsModulePlugin' } dependencies { diff --git a/modules/parent-join/build.gradle b/modules/parent-join/build.gradle index d509e65106e7b..08b624ea4f3fa 100644 --- a/modules/parent-join/build.gradle +++ b/modules/parent-join/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'This module adds the support parent-child queries and aggregations' - classname 'org.opensearch.join.ParentJoinModulePlugin' + description = 'This module adds the support parent-child queries and aggregations' + classname = 'org.opensearch.join.ParentJoinModulePlugin' hasClientJar = true } diff --git a/modules/percolator/build.gradle b/modules/percolator/build.gradle index 2312f7bda80b2..9669d1057fb41 100644 --- a/modules/percolator/build.gradle +++ b/modules/percolator/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Percolator module adds capability to index queries and query these queries by specifying documents' - classname 'org.opensearch.percolator.PercolatorModulePlugin' + description = 'Percolator module adds capability to index queries and query these queries by specifying documents' + classname = 'org.opensearch.percolator.PercolatorModulePlugin' hasClientJar = true } diff --git a/modules/rank-eval/build.gradle b/modules/rank-eval/build.gradle index 4232d583dc984..f6946c631221d 100644 --- a/modules/rank-eval/build.gradle +++ b/modules/rank-eval/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Rank Eval module adds APIs to evaluate ranking quality.' - classname 'org.opensearch.index.rankeval.RankEvalModulePlugin' + description = 'The Rank Eval module adds APIs to evaluate ranking quality.' + classname = 'org.opensearch.index.rankeval.RankEvalModulePlugin' hasClientJar = true } diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index cad7d67f3ef84..a44e1004d93ad 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -40,8 +40,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' - classname 'org.opensearch.index.reindex.ReindexModulePlugin' + description = 'The Reindex module adds APIs to reindex from one index to another or update documents in place.' + classname = 'org.opensearch.index.reindex.ReindexModulePlugin' hasClientJar = true } diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 7a697623eb8d9..49c3a12f23fe0 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -37,8 +37,8 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for URL repository' - classname 'org.opensearch.plugin.repository.url.URLRepositoryModulePlugin' + description = 'Module for URL repository' + classname = 'org.opensearch.plugin.repository.url.URLRepositoryModulePlugin' } restResources { @@ -56,7 +56,7 @@ task urlFixture(type: AntFixture) { doFirst { repositoryDir.mkdirs() } - env 'CLASSPATH', "${-> project.sourceSets.test.runtimeClasspath.asPath}" + env 'CLASSPATH', "${-> sourceSets.test.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.repositories.url.URLFixture', baseDir, "${repositoryDir.absolutePath}" } diff --git a/modules/search-pipeline-common/build.gradle b/modules/search-pipeline-common/build.gradle index 657392d884e97..4b6d579dc22e8 100644 --- a/modules/search-pipeline-common/build.gradle +++ b/modules/search-pipeline-common/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Module for search pipeline processors that do not require additional security permissions or have large dependencies and resources' - classname 'org.opensearch.search.pipeline.common.SearchPipelineCommonModulePlugin' + description = 'Module for search pipeline processors that do not require additional security permissions or have large dependencies and resources' + classname = 'org.opensearch.search.pipeline.common.SearchPipelineCommonModulePlugin' extendedPlugins = ['lang-painless'] } diff --git a/modules/systemd/build.gradle b/modules/systemd/build.gradle index 726092ffe4273..25a32616777b7 100644 --- a/modules/systemd/build.gradle +++ b/modules/systemd/build.gradle @@ -29,6 +29,6 @@ */ opensearchplugin { - description 'Integrates OpenSearch with systemd' - classname 'org.opensearch.systemd.SystemdModulePlugin' + description = 'Integrates OpenSearch with systemd' + classname = 'org.opensearch.systemd.SystemdModulePlugin' } diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index cdaf8350055f0..4e68a4ce17f73 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -49,8 +49,8 @@ apply plugin: 'opensearch.publish' * maybe figure out a way to run all tests from core with netty4/network? */ opensearchplugin { - description 'Netty 4 based transport implementation' - classname 'org.opensearch.transport.Netty4ModulePlugin' + description = 'Netty 4 based transport implementation' + classname = 'org.opensearch.transport.Netty4ModulePlugin' hasClientJar = true } diff --git a/plugins/analysis-icu/build.gradle b/plugins/analysis-icu/build.gradle index e5c084559f0a6..25e1587136d78 100644 --- a/plugins/analysis-icu/build.gradle +++ b/plugins/analysis-icu/build.gradle @@ -32,8 +32,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The ICU Analysis plugin integrates the Lucene ICU module into OpenSearch, adding ICU-related analysis components.' - classname 'org.opensearch.plugin.analysis.icu.AnalysisICUPlugin' + description = 'The ICU Analysis plugin integrates the Lucene ICU module into OpenSearch, adding ICU-related analysis components.' + classname = 'org.opensearch.plugin.analysis.icu.AnalysisICUPlugin' hasClientJar = true } diff --git a/plugins/analysis-kuromoji/build.gradle b/plugins/analysis-kuromoji/build.gradle index 426b85f44bf55..5babcb2757f5e 100644 --- a/plugins/analysis-kuromoji/build.gradle +++ b/plugins/analysis-kuromoji/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin' + description = 'The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin' } dependencies { diff --git a/plugins/analysis-nori/build.gradle b/plugins/analysis-nori/build.gradle index 3def7f9c6c60f..41a73fb3895ef 100644 --- a/plugins/analysis-nori/build.gradle +++ b/plugins/analysis-nori/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.nori.AnalysisNoriPlugin' + description = 'The Korean (nori) Analysis plugin integrates Lucene nori analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.nori.AnalysisNoriPlugin' } dependencies { diff --git a/plugins/analysis-phonenumber/build.gradle b/plugins/analysis-phonenumber/build.gradle index c9913b36f8508..1e19167582e19 100644 --- a/plugins/analysis-phonenumber/build.gradle +++ b/plugins/analysis-phonenumber/build.gradle @@ -12,8 +12,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Adds an analyzer for phone numbers to OpenSearch.' - classname 'org.opensearch.analysis.phone.PhoneNumberAnalysisPlugin' + description = 'Adds an analyzer for phone numbers to OpenSearch.' + classname = 'org.opensearch.analysis.phone.PhoneNumberAnalysisPlugin' } dependencies { diff --git a/plugins/analysis-phonetic/build.gradle b/plugins/analysis-phonetic/build.gradle index ffa0466d43170..c0272b78c3db8 100644 --- a/plugins/analysis-phonetic/build.gradle +++ b/plugins/analysis-phonetic/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Phonetic Analysis plugin integrates phonetic token filter analysis with opensearch.' - classname 'org.opensearch.plugin.analysis.AnalysisPhoneticPlugin' + description = 'The Phonetic Analysis plugin integrates phonetic token filter analysis with opensearch.' + classname = 'org.opensearch.plugin.analysis.AnalysisPhoneticPlugin' } dependencies { diff --git a/plugins/analysis-smartcn/build.gradle b/plugins/analysis-smartcn/build.gradle index d74d314ab0673..448a3a5e0a637 100644 --- a/plugins/analysis-smartcn/build.gradle +++ b/plugins/analysis-smartcn/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin' + description = 'Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin' } dependencies { diff --git a/plugins/analysis-stempel/build.gradle b/plugins/analysis-stempel/build.gradle index d713f80172c58..90523ae2d9d95 100644 --- a/plugins/analysis-stempel/build.gradle +++ b/plugins/analysis-stempel/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into opensearch.' - classname 'org.opensearch.plugin.analysis.stempel.AnalysisStempelPlugin' + description = 'The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into opensearch.' + classname = 'org.opensearch.plugin.analysis.stempel.AnalysisStempelPlugin' } dependencies { diff --git a/plugins/analysis-ukrainian/build.gradle b/plugins/analysis-ukrainian/build.gradle index 6122c055c788e..7e760423438c1 100644 --- a/plugins/analysis-ukrainian/build.gradle +++ b/plugins/analysis-ukrainian/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into opensearch.' - classname 'org.opensearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' + description = 'The Ukrainian Analysis plugin integrates the Lucene UkrainianMorfologikAnalyzer into opensearch.' + classname = 'org.opensearch.plugin.analysis.ukrainian.AnalysisUkrainianPlugin' } dependencies { diff --git a/plugins/build.gradle b/plugins/build.gradle index 4e6de2c120d35..6c7fb749d08ac 100644 --- a/plugins/build.gradle +++ b/plugins/build.gradle @@ -39,9 +39,9 @@ configure(subprojects.findAll { it.parent.path == project.path }) { opensearchplugin { // for local ES plugins, the name of the plugin is the same as the directory - name project.name + name = project.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } } diff --git a/plugins/cache-ehcache/build.gradle b/plugins/cache-ehcache/build.gradle index 5747624e2fb69..6390b045db8ea 100644 --- a/plugins/cache-ehcache/build.gradle +++ b/plugins/cache-ehcache/build.gradle @@ -14,8 +14,8 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Ehcache based cache implementation.' - classname 'org.opensearch.cache.EhcacheCachePlugin' + description = 'Ehcache based cache implementation.' + classname = 'org.opensearch.cache.EhcacheCachePlugin' } versions << [ diff --git a/plugins/crypto-kms/build.gradle b/plugins/crypto-kms/build.gradle index c4a8609b6df48..fa63a4a7153d3 100644 --- a/plugins/crypto-kms/build.gradle +++ b/plugins/crypto-kms/build.gradle @@ -16,8 +16,8 @@ apply plugin: 'opensearch.publish' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'AWS KMS plugin to provide crypto keys' - classname 'org.opensearch.crypto.kms.CryptoKmsPlugin' + description = 'AWS KMS plugin to provide crypto keys' + classname = 'org.opensearch.crypto.kms.CryptoKmsPlugin' } ext { diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 7f34cec94499c..2627b3061bdf2 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -35,8 +35,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' - classname 'org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin' + description = 'The Azure Classic Discovery plugin allows to use Azure Classic API for the unicast discovery mechanism' + classname = 'org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin' } versions << [ diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 9c9f64f09b915..8d615e0bf8d9d 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -34,8 +34,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.' - classname 'org.opensearch.discovery.ec2.Ec2DiscoveryPlugin' + description = 'The EC2 discovery plugin allows to use AWS API for the unicast discovery mechanism.' + classname = 'org.opensearch.discovery.ec2.Ec2DiscoveryPlugin' } dependencies { diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index a844576d67ece..41c423c57ba36 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -76,8 +76,8 @@ yamlRestTest.enabled = false */ ['KeyStore', 'EnvVariables', 'SystemProperties', 'ContainerCredentials', 'InstanceProfile'].forEach { action -> AntFixture fixture = tasks.create(name: "ec2Fixture${action}", type: AntFixture) { - dependsOn project.sourceSets.yamlRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" + dependsOn sourceSets.yamlRestTest.runtimeClasspath + env 'CLASSPATH', "${-> sourceSets.yamlRestTest.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.discovery.ec2.AmazonEC2Fixture', baseDir, "${buildDir}/testclusters/yamlRestTest${action}-1/config/unicast_hosts.txt" } @@ -85,7 +85,7 @@ yamlRestTest.enabled = false tasks.create(name: "yamlRestTest${action}", type: RestIntegTestTask) { dependsOn fixture } - SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSetContainer sourceSets = getExtensions().getByType(SourceSetContainer.class); SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) "yamlRestTest${action}" { setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 3214db2074198..a9338bfc43a2c 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -13,8 +13,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.' - classname 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin' + description = 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.' + classname = 'org.opensearch.plugin.discovery.gce.GceDiscoveryPlugin' } dependencies { @@ -52,9 +52,10 @@ check { dependsOn 'qa:gce:check' } +def name = project.name test { // this is needed for insecure plugins, remove if possible! - systemProperty 'tests.artifact', project.name + systemProperty 'tests.artifact', name } thirdPartyAudit { diff --git a/plugins/discovery-gce/qa/gce/build.gradle b/plugins/discovery-gce/qa/gce/build.gradle index 841cd396a8bcf..562ec4e1db482 100644 --- a/plugins/discovery-gce/qa/gce/build.gradle +++ b/plugins/discovery-gce/qa/gce/build.gradle @@ -51,8 +51,8 @@ restResources { /** A task to start the GCEFixture which emulates a GCE service **/ task gceFixture(type: AntFixture) { - dependsOn project.sourceSets.yamlRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.yamlRestTest.runtimeClasspath.asPath}" + dependsOn sourceSets.yamlRestTest.runtimeClasspath + env 'CLASSPATH', "${-> sourceSets.yamlRestTest.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.cloud.gce.GCEFixture', baseDir, "${buildDir}/testclusters/yamlRestTest-1/config/unicast_hosts.txt" } diff --git a/plugins/examples/custom-settings/build.gradle b/plugins/examples/custom-settings/build.gradle index 5b35d887b3db1..c83e710283322 100644 --- a/plugins/examples/custom-settings/build.gradle +++ b/plugins/examples/custom-settings/build.gradle @@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'custom-settings' - description 'An example plugin showing how to register custom settings' - classname 'org.opensearch.example.customsettings.ExampleCustomSettingsPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-settings' + description = 'An example plugin showing how to register custom settings' + classname = 'org.opensearch.example.customsettings.ExampleCustomSettingsPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } testClusters.all { diff --git a/plugins/examples/custom-significance-heuristic/build.gradle b/plugins/examples/custom-significance-heuristic/build.gradle index ab013657fed23..72efbaafad8e3 100644 --- a/plugins/examples/custom-significance-heuristic/build.gradle +++ b/plugins/examples/custom-significance-heuristic/build.gradle @@ -31,9 +31,9 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'custom-significance-heuristic' - description 'An example plugin showing how to write and register a custom significance heuristic' - classname 'org.opensearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-significance-heuristic' + description = 'An example plugin showing how to write and register a custom significance heuristic' + classname = 'org.opensearch.example.customsigheuristic.CustomSignificanceHeuristicPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } diff --git a/plugins/examples/custom-suggester/build.gradle b/plugins/examples/custom-suggester/build.gradle index d60523306b3c1..977cad7d1452e 100644 --- a/plugins/examples/custom-suggester/build.gradle +++ b/plugins/examples/custom-suggester/build.gradle @@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'custom-suggester' - description 'An example plugin showing how to write and register a custom suggester' - classname 'org.opensearch.example.customsuggester.CustomSuggesterPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'custom-suggester' + description = 'An example plugin showing how to write and register a custom suggester' + classname = 'org.opensearch.example.customsuggester.CustomSuggesterPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } testClusters.all { diff --git a/plugins/examples/painless-allowlist/build.gradle b/plugins/examples/painless-allowlist/build.gradle index 99722126dd171..d8b4c15536a75 100644 --- a/plugins/examples/painless-allowlist/build.gradle +++ b/plugins/examples/painless-allowlist/build.gradle @@ -31,12 +31,12 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'painless-allowlist' - description 'An example allowlisting additional classes and methods in painless' - classname 'org.opensearch.example.painlessallowlist.MyAllowlistPlugin' + name = 'painless-allowlist' + description = 'An example allowlisting additional classes and methods in painless' + classname = 'org.opensearch.example.painlessallowlist.MyAllowlistPlugin' extendedPlugins = ['lang-painless'] - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/examples/rescore/build.gradle b/plugins/examples/rescore/build.gradle index b33d79395d92b..ad450798514ea 100644 --- a/plugins/examples/rescore/build.gradle +++ b/plugins/examples/rescore/build.gradle @@ -31,9 +31,9 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'example-rescore' - description 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' - classname 'org.opensearch.example.rescore.ExampleRescorePlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'example-rescore' + description = 'An example plugin implementing rescore and verifying that plugins *can* implement rescore' + classname = 'org.opensearch.example.rescore.ExampleRescorePlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } diff --git a/plugins/examples/rest-handler/build.gradle b/plugins/examples/rest-handler/build.gradle index b97d091af9d08..c3c25b4b0a841 100644 --- a/plugins/examples/rest-handler/build.gradle +++ b/plugins/examples/rest-handler/build.gradle @@ -35,11 +35,11 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.java-rest-test' opensearchplugin { - name 'rest-handler' - description 'An example plugin showing how to register a REST handler' - classname 'org.opensearch.example.resthandler.ExampleRestHandlerPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'rest-handler' + description = 'An example plugin showing how to register a REST handler' + classname = 'org.opensearch.example.resthandler.ExampleRestHandlerPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } // No unit tests in this example @@ -47,7 +47,7 @@ test.enabled = false tasks.register("exampleFixture", org.opensearch.gradle.test.AntFixture) { dependsOn sourceSets.javaRestTest.runtimeClasspath - env 'CLASSPATH', "${-> project.sourceSets.javaRestTest.runtimeClasspath.asPath}" + env 'CLASSPATH', "${-> sourceSets.javaRestTest.runtimeClasspath.asPath}" executable = "${BuildParams.runtimeJavaHome}/bin/java" args 'org.opensearch.example.resthandler.ExampleFixture', baseDir, 'TEST' } diff --git a/plugins/examples/script-expert-scoring/build.gradle b/plugins/examples/script-expert-scoring/build.gradle index e4ddd97abbe4c..1a880e80d2e49 100644 --- a/plugins/examples/script-expert-scoring/build.gradle +++ b/plugins/examples/script-expert-scoring/build.gradle @@ -31,11 +31,11 @@ apply plugin: 'opensearch.opensearchplugin' apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name 'script-expert-scoring' - description 'An example script engine to use low level Lucene internals for expert scoring' - classname 'org.opensearch.example.expertscript.ExpertScriptPlugin' - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = 'script-expert-scoring' + description = 'An example script engine to use low level Lucene internals for expert scoring' + classname = 'org.opensearch.example.expertscript.ExpertScriptPlugin' + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } test.enabled = false diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index 222443efcb214..2ea3e8e6b1e50 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -9,11 +9,11 @@ apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Plugin for identity features in OpenSearch.' - classname 'org.opensearch.identity.shiro.ShiroIdentityPlugin' - name project.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + description = 'Plugin for identity features in OpenSearch.' + classname = 'org.opensearch.identity.shiro.ShiroIdentityPlugin' + name = project.name + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } dependencies { diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 2948ca12904f5..e0ad602266602 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -33,8 +33,8 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'Ingest processor that uses Apache Tika to extract contents' - classname 'org.opensearch.ingest.attachment.IngestAttachmentPlugin' + description = 'Ingest processor that uses Apache Tika to extract contents' + classname = 'org.opensearch.ingest.attachment.IngestAttachmentPlugin' } versions << [ diff --git a/plugins/mapper-annotated-text/build.gradle b/plugins/mapper-annotated-text/build.gradle index 5ff3bbe37810b..c7bc5b795ed71 100644 --- a/plugins/mapper-annotated-text/build.gradle +++ b/plugins/mapper-annotated-text/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' - classname 'org.opensearch.plugin.mapper.AnnotatedTextPlugin' + description = 'The Mapper Annotated_text plugin adds support for text fields with markup used to inject annotation tokens into the index.' + classname = 'org.opensearch.plugin.mapper.AnnotatedTextPlugin' } restResources { diff --git a/plugins/mapper-murmur3/build.gradle b/plugins/mapper-murmur3/build.gradle index 67006f29b7565..42e27d7b3908a 100644 --- a/plugins/mapper-murmur3/build.gradle +++ b/plugins/mapper-murmur3/build.gradle @@ -30,8 +30,8 @@ apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - description 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' - classname 'org.opensearch.plugin.mapper.MapperMurmur3Plugin' + description = 'The Mapper Murmur3 plugin allows to compute hashes of a field\'s values at index-time and to store them in the index.' + classname = 'org.opensearch.plugin.mapper.MapperMurmur3Plugin' } restResources { diff --git a/plugins/mapper-size/build.gradle b/plugins/mapper-size/build.gradle index fb4f7c4e00c4f..8c6caaf09e01a 100644 --- a/plugins/mapper-size/build.gradle +++ b/plugins/mapper-size/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Mapper Size plugin allows document to record their uncompressed size at index time.' - classname 'org.opensearch.plugin.mapper.MapperSizePlugin' + description = 'The Mapper Size plugin allows document to record their uncompressed size at index time.' + classname = 'org.opensearch.plugin.mapper.MapperSizePlugin' } restResources { diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index ad12ec9003e64..c6b303f22112e 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -39,8 +39,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Azure Repository plugin adds support for Azure storage repositories.' - classname 'org.opensearch.repositories.azure.AzureRepositoryPlugin' + description = 'The Azure Repository plugin adds support for Azure storage repositories.' + classname = 'org.opensearch.repositories.azure.AzureRepositoryPlugin' } dependencies { diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 97ae88aac5485..d4c870e1ca2b2 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -43,8 +43,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The GCS repository plugin adds Google Cloud Storage support for repositories.' - classname 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin' + description = 'The GCS repository plugin adds Google Cloud Storage support for repositories.' + classname = 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin' } dependencies { diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index faa9b2bfff84d..441c6ae998406 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -43,8 +43,8 @@ apply plugin: 'opensearch.rest-resources' apply plugin: 'opensearch.rest-test' opensearchplugin { - description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' - classname 'org.opensearch.repositories.hdfs.HdfsPlugin' + description = 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' + classname = 'org.opensearch.repositories.hdfs.HdfsPlugin' } versions << [ @@ -133,11 +133,11 @@ project(':test:fixtures:krb5kdc-fixture').tasks.preProcessFixture { // Create HDFS File System Testing Fixtures for HA/Secure combinations for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) { - def tsk = project.tasks.register(fixtureName, org.opensearch.gradle.test.AntFixture) { - dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture + def tsk = tasks.register(fixtureName, org.opensearch.gradle.test.AntFixture) { + dependsOn configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = "${BuildParams.runtimeJavaHome}/bin/java" - env 'CLASSPATH', "${-> project.configurations.hdfsFixture.asPath}" - maxWaitInSeconds 60 + env 'CLASSPATH', "${-> configurations.hdfsFixture.asPath}" + maxWaitInSeconds = 60 onlyIf { BuildParams.inFipsJvm == false } waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when @@ -187,7 +187,7 @@ Set disabledIntegTestTaskNames = [] for (String integTestTaskName : ['integTestHa', 'integTestSecure', 'integTestSecureHa']) { task "${integTestTaskName}"(type: RestIntegTestTask) { description = "Runs rest tests against an opensearch cluster with HDFS." - dependsOn(project.bundlePlugin) + dependsOn(bundlePlugin) if (disabledIntegTestTaskNames.contains(integTestTaskName)) { enabled = false; diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 398611a016ed2..6e84edddcc252 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -41,8 +41,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The S3 repository plugin adds S3 repositories' - classname 'org.opensearch.repositories.s3.S3RepositoryPlugin' + description = 'The S3 repository plugin adds S3 repositories' + classname = 'org.opensearch.repositories.s3.S3RepositoryPlugin' } dependencies { diff --git a/plugins/store-smb/build.gradle b/plugins/store-smb/build.gradle index add4abb22329f..d702978730f45 100644 --- a/plugins/store-smb/build.gradle +++ b/plugins/store-smb/build.gradle @@ -31,8 +31,8 @@ apply plugin: 'opensearch.yaml-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The Store SMB plugin adds support for SMB stores.' - classname 'org.opensearch.plugin.store.smb.SMBStorePlugin' + description = 'The Store SMB plugin adds support for SMB stores.' + classname = 'org.opensearch.plugin.store.smb.SMBStorePlugin' } restResources { restApi { diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 872d928aa093f..3aba7d64cd96d 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -14,8 +14,8 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'Opentelemetry based telemetry implementation.' - classname 'org.opensearch.telemetry.OTelTelemetryPlugin' + description = 'Opentelemetry based telemetry implementation.' + classname = 'org.opensearch.telemetry.OTelTelemetryPlugin' hasClientJar = false } diff --git a/plugins/transport-grpc/build.gradle b/plugins/transport-grpc/build.gradle index 47f62b2b8c3f3..5c6bc8efe1098 100644 --- a/plugins/transport-grpc/build.gradle +++ b/plugins/transport-grpc/build.gradle @@ -9,8 +9,8 @@ import org.gradle.api.attributes.java.TargetJvmEnvironment */ opensearchplugin { - description 'gRPC based transport implementation' - classname 'org.opensearch.transport.grpc.GrpcPlugin' + description = 'gRPC based transport implementation' + classname = 'org.opensearch.transport.grpc.GrpcPlugin' } dependencies { diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 7132c97864238..6ac27b51f8902 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -34,8 +34,8 @@ apply plugin: "opensearch.publish" apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'The nio transport.' - classname 'org.opensearch.transport.nio.NioTransportPlugin' + description = 'The nio transport.' + classname = 'org.opensearch.transport.nio.NioTransportPlugin' hasClientJar = true } diff --git a/plugins/transport-reactor-netty4/build.gradle b/plugins/transport-reactor-netty4/build.gradle index 1e76d1a29efc1..12ae5ce99632e 100644 --- a/plugins/transport-reactor-netty4/build.gradle +++ b/plugins/transport-reactor-netty4/build.gradle @@ -23,8 +23,8 @@ apply plugin: 'opensearch.internal-cluster-test' apply plugin: 'opensearch.publish' opensearchplugin { - description 'Reactor Netty 4 based transport implementation' - classname 'org.opensearch.transport.reactor.ReactorNetty4Plugin' + description = 'Reactor Netty 4 based transport implementation' + classname = 'org.opensearch.transport.reactor.ReactorNetty4Plugin' hasClientJar = true } diff --git a/plugins/workload-management/build.gradle b/plugins/workload-management/build.gradle index ad6737bbd24b0..2e8b0df468092 100644 --- a/plugins/workload-management/build.gradle +++ b/plugins/workload-management/build.gradle @@ -14,8 +14,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.internal-cluster-test' opensearchplugin { - description 'OpenSearch Workload Management Plugin.' - classname 'org.opensearch.plugin.wlm.WorkloadManagementPlugin' + description = 'OpenSearch Workload Management Plugin.' + classname = 'org.opensearch.plugin.wlm.WorkloadManagementPlugin' } dependencies { diff --git a/qa/die-with-dignity/build.gradle b/qa/die-with-dignity/build.gradle index db8762fe921bf..a3e5f295001bc 100644 --- a/qa/die-with-dignity/build.gradle +++ b/qa/die-with-dignity/build.gradle @@ -16,8 +16,8 @@ apply plugin: 'opensearch.java-rest-test' apply plugin: 'opensearch.opensearchplugin' opensearchplugin { - description 'Die with dignity plugin' - classname 'org.opensearch.DieWithDignityPlugin' + description = 'Die with dignity plugin' + classname = 'org.opensearch.DieWithDignityPlugin' } // let the javaRestTest see the classpath of main diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index 82aa4cd511ef1..4b04fcea872b0 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -52,7 +52,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") @@ -62,7 +62,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#upgradedClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" dependsOn "${baseName}#oldClusterTest" doFirst { testClusters."${baseName}".goToNextVersion() diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 822977c55368a..9148f5a3ba3e6 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -69,7 +69,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { } tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") diff --git a/qa/multi-cluster-search/build.gradle b/qa/multi-cluster-search/build.gradle index 907791bd6a7de..a0a271fa01fb3 100644 --- a/qa/multi-cluster-search/build.gradle +++ b/qa/multi-cluster-search/build.gradle @@ -49,7 +49,7 @@ testClusters.'remote-cluster' { } task mixedClusterTest(type: RestIntegTestTask) { - useCluster testClusters.'remote-cluster' + useCluster project, testClusters.'remote-cluster' dependsOn 'remote-cluster' systemProperty 'tests.rest.suite', 'multi_cluster' } diff --git a/qa/remote-clusters/build.gradle b/qa/remote-clusters/build.gradle index 2f3cd9d2d898d..a52d4f2035bea 100644 --- a/qa/remote-clusters/build.gradle +++ b/qa/remote-clusters/build.gradle @@ -59,7 +59,7 @@ tasks.named("preProcessFixture").configure { } doLast { // tests expect to have an empty repo - project.delete( + delete( "${buildDir}/repo" ) createAndSetWritable( diff --git a/qa/repository-multi-version/build.gradle b/qa/repository-multi-version/build.gradle index 67710095d30bc..2bf18d02254ae 100644 --- a/qa/repository-multi-version/build.gradle +++ b/qa/repository-multi-version/build.gradle @@ -59,7 +59,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#Step1OldClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${oldClusterName}" + useCluster project, testClusters."${oldClusterName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") @@ -68,19 +68,19 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#Step2NewClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${newClusterName}" + useCluster project, testClusters."${newClusterName}" dependsOn "${baseName}#Step1OldClusterTest" systemProperty 'tests.rest.suite', 'step2' } tasks.register("${baseName}#Step3OldClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${oldClusterName}" + useCluster project, testClusters."${oldClusterName}" dependsOn "${baseName}#Step2NewClusterTest" systemProperty 'tests.rest.suite', 'step3' } tasks.register("${baseName}#Step4NewClusterTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${newClusterName}" + useCluster project, testClusters."${newClusterName}" dependsOn "${baseName}#Step3OldClusterTest" systemProperty 'tests.rest.suite', 'step4' } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 3dff452be855f..ffcf815bfa264 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -67,7 +67,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { dependsOn processTestResources - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" mustRunAfter(precommit) doFirst { delete("${buildDir}/cluster/shared/repo/${baseName}") @@ -80,7 +80,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { tasks.register("${baseName}#oneThirdUpgradedTest", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oldClusterTest" - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" doFirst { testClusters."${baseName}".nextNodeToNextVersion() } @@ -93,7 +93,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { tasks.register("${baseName}#twoThirdsUpgradedTest", StandaloneRestIntegTestTask) { dependsOn "${baseName}#oneThirdUpgradedTest" - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" doFirst { testClusters."${baseName}".nextNodeToNextVersion() } @@ -109,7 +109,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.wireCompatible) { doFirst { testClusters."${baseName}".nextNodeToNextVersion() } - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" systemProperty 'tests.rest.suite', 'upgraded_cluster' systemProperty 'tests.upgrade_from_version', bwcVersionStr diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 25261f5e3ff7d..af389a7c59835 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -47,7 +47,7 @@ testClusters.integTest { integTest { doFirst { - project.delete(repo) + delete(repo) repo.mkdirs() } } diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 8b0dd20899862..18e4b5b549579 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -48,7 +48,7 @@ for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) { } tasks.register("${baseName}#integTest", StandaloneRestIntegTestTask) { - useCluster testClusters."${baseName}" + useCluster project, testClusters."${baseName}" nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}") nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}") } diff --git a/sandbox/plugins/build.gradle b/sandbox/plugins/build.gradle index 61afb2c568e1b..1b7b6889972fd 100644 --- a/sandbox/plugins/build.gradle +++ b/sandbox/plugins/build.gradle @@ -12,8 +12,8 @@ configure(subprojects.findAll { it.parent.path == project.path }) { apply plugin: 'opensearch.opensearchplugin' opensearchplugin { - name project.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = project.name + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } } diff --git a/server/build.gradle b/server/build.gradle index 8dd23491ccd69..6559c7247200a 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -42,7 +42,7 @@ plugins { publishing { publications { nebula(MavenPublication) { - artifactId 'opensearch' + artifactId = 'opensearch' } } } diff --git a/test/external-modules/build.gradle b/test/external-modules/build.gradle index 8e59c309826e7..e575323b6248c 100644 --- a/test/external-modules/build.gradle +++ b/test/external-modules/build.gradle @@ -17,9 +17,9 @@ subprojects { apply plugin: 'opensearch.yaml-rest-test' opensearchplugin { - name it.name - licenseFile rootProject.file('licenses/APACHE-LICENSE-2.0.txt') - noticeFile rootProject.file('NOTICE.txt') + name = it.name + licenseFile = rootProject.file('licenses/APACHE-LICENSE-2.0.txt') + noticeFile = rootProject.file('NOTICE.txt') } tasks.named('yamlRestTest').configure { diff --git a/test/external-modules/delayed-aggs/build.gradle b/test/external-modules/delayed-aggs/build.gradle index d470269c8a6e2..a7662f72e64e6 100644 --- a/test/external-modules/delayed-aggs/build.gradle +++ b/test/external-modules/delayed-aggs/build.gradle @@ -29,8 +29,8 @@ */ opensearchplugin { - description 'A test module that allows to delay aggregations on shards with a configurable time' - classname 'org.opensearch.search.aggregations.DelayedShardAggregationPlugin' + description = 'A test module that allows to delay aggregations on shards with a configurable time' + classname = 'org.opensearch.search.aggregations.DelayedShardAggregationPlugin' } restResources { diff --git a/test/fixtures/azure-fixture/build.gradle b/test/fixtures/azure-fixture/build.gradle index e2b1d475fbab7..904297a3b4c65 100644 --- a/test/fixtures/azure-fixture/build.gradle +++ b/test/fixtures/azure-fixture/build.gradle @@ -46,7 +46,7 @@ preProcessFixture { } doLast { file("${testFixturesDir}/shared").mkdirs() - project.copy { + copy { from jar from configurations.runtimeClasspath into "${testFixturesDir}/shared" diff --git a/test/fixtures/gcs-fixture/build.gradle b/test/fixtures/gcs-fixture/build.gradle index 564cf33687436..60f672e6bd00b 100644 --- a/test/fixtures/gcs-fixture/build.gradle +++ b/test/fixtures/gcs-fixture/build.gradle @@ -46,7 +46,7 @@ preProcessFixture { } doLast { file("${testFixturesDir}/shared").mkdirs() - project.copy { + copy { from jar from configurations.runtimeClasspath into "${testFixturesDir}/shared" diff --git a/test/fixtures/s3-fixture/build.gradle b/test/fixtures/s3-fixture/build.gradle index 86456b3364c4c..519e8514af4d4 100644 --- a/test/fixtures/s3-fixture/build.gradle +++ b/test/fixtures/s3-fixture/build.gradle @@ -46,7 +46,7 @@ preProcessFixture { } doLast { file("${testFixturesDir}/shared").mkdirs() - project.copy { + copy { from jar from configurations.runtimeClasspath into "${testFixturesDir}/shared" From 4d943993ac93e1a140c1b58c11e812a58578f27d Mon Sep 17 00:00:00 2001 From: Ralph Ursprung <39383228+rursprung@users.noreply.github.com> Date: Fri, 10 Jan 2025 19:36:47 +0100 Subject: [PATCH 19/37] `phone-search` analyzer: don't emit sip/tel prefix, int'l prefix, extension & unformatted input (#16993) * `phone-search` analyzer: don't emit int'l prefix this was an oversight in the initial implementation: if the tokenizer emits the international calling prefix in the search analyzer then all documents with the same international calling prefix will match. e.g. when searching for `+1-555-123-4567` not only documents with this number would match but also any other document with a `1` token (i.e. any other number with this prefix). thus the search functionality is currently broken for this analyzer, making it useless. the test coverage has now been extended to cover these and other use-cases. Signed-off-by: Ralph Ursprung * `phone-search` analyzer: don't emit extension & unformatted input if these tokens are emitted it meant that phone numbers with other international dialling prefixes still matched. e.g. searching for `+1 1234` would also match a number stored as `+2 1234`, which was wrong. the tokens still need to be emited for the `phone` analyzer, e.g. when the user only enters the extension / local number it should still match, the same is with the other ngrams: these are needed for search-as-you-type style queries where the user input needs to match against partial phone numbers. Signed-off-by: Ralph Ursprung * `phone-search` analyzer: don't emit sip/tel prefix in line with the previous two commits, this is something else the search analyzer shouldn't emit since otherwise searching for any number with such a prefix will match _any_ document with the same prefix. Signed-off-by: Ralph Ursprung --------- Signed-off-by: Ralph Ursprung --- CHANGELOG.md | 1 + .../phone/PhoneNumberTermTokenizer.java | 23 ++- .../phone/PhoneNumberAnalyzerTests.java | 18 +-- .../test/analysis-phone/20_search.yml | 139 ++++++++++++++++++ 4 files changed, 166 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 512ba48941c87..a57561da861ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -92,6 +92,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) +- The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) ### Security diff --git a/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java b/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java index 6b95594204eb4..e0541755a2b3e 100644 --- a/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java +++ b/plugins/analysis-phonenumber/src/main/java/org/opensearch/analysis/phone/PhoneNumberTermTokenizer.java @@ -98,7 +98,9 @@ private Set getTokens() throws IOException { // Rip off the "tel:" or "sip:" prefix if (input.indexOf("tel:") == 0 || input.indexOf("sip:") == 0) { - tokens.add(input.substring(0, 4)); + if (addNgrams) { + tokens.add(input.substring(0, 4)); + } input = input.substring(4); } @@ -128,14 +130,23 @@ private Set getTokens() throws IOException { countryCode = Optional.of(String.valueOf(numberProto.getCountryCode())); input = String.valueOf(numberProto.getNationalNumber()); - // Add Country code, extension, and the number as tokens - tokens.add(countryCode.get()); + // add full number as tokens tokens.add(countryCode.get() + input); - if (!Strings.isEmpty(numberProto.getExtension())) { - tokens.add(numberProto.getExtension()); + + if (addNgrams) { + // Consider the country code as an ngram - it makes no sense in the search analyzer as it'd match all values with the + // same country code + tokens.add(countryCode.get()); + + // Add extension without country code (not done for search analyzer as that might match numbers in other countries as + // well!) + if (!Strings.isEmpty(numberProto.getExtension())) { + tokens.add(numberProto.getExtension()); + } + // Add unformatted input (most likely the same as the extension now since the prefix has been removed) + tokens.add(input); } - tokens.add(input); } } catch (final NumberParseException | StringIndexOutOfBoundsException e) { // Libphone didn't like it, no biggie. We'll just ngram the number as it is. diff --git a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java index 332f6d21f47d6..d55c0b2ce7d2a 100644 --- a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java +++ b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java @@ -87,11 +87,7 @@ public void testEuropeDetailled() throws IOException { * Test for all tokens which are emitted by the "phone" analyzer. */ public void testEuropeDetailledSearch() throws IOException { - assertTokensAreInAnyOrder( - phoneSearchAnalyzer, - "tel:+441344840400", - Arrays.asList("tel:+441344840400", "tel:", "441344840400", "44", "1344840400") - ); + assertTokensAreInAnyOrder(phoneSearchAnalyzer, "tel:+441344840400", Arrays.asList("tel:+441344840400", "441344840400")); } public void testEurope() throws IOException { @@ -166,6 +162,10 @@ public void testTelPrefix() throws IOException { assertTokensInclude("tel:+1228", Arrays.asList("1228", "122", "228")); } + public void testTelPrefixSearch() throws IOException { + assertTokensInclude("tel:+1228", Arrays.asList("1228")); + } + public void testNumberPrefix() throws IOException { assertTokensInclude("+1228", Arrays.asList("1228", "122", "228")); } @@ -189,21 +189,21 @@ public void testLocalNumberWithCH() throws IOException { } public void testSearchInternationalPrefixWithZZ() throws IOException { - assertTokensInclude(phoneSearchAnalyzer, "+41583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchAnalyzer, "+41583161010", Arrays.asList("+41583161010", "41583161010")); } public void testSearchInternationalPrefixWithCH() throws IOException { - assertTokensInclude(phoneSearchCHAnalyzer, "+41583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "+41583161010", Arrays.asList("+41583161010", "41583161010")); } public void testSearchNationalPrefixWithCH() throws IOException { // + is equivalent to 00 in Switzerland - assertTokensInclude(phoneSearchCHAnalyzer, "0041583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "0041583161010", Arrays.asList("0041583161010", "41583161010")); } public void testSearchLocalNumberWithCH() throws IOException { // when omitting the international prefix swiss numbers must start with '0' - assertTokensInclude(phoneSearchCHAnalyzer, "0583161010", Arrays.asList("41", "41583161010", "583161010")); + assertTokensAreInAnyOrder(phoneSearchCHAnalyzer, "0583161010", Arrays.asList("0583161010", "41583161010")); } /** diff --git a/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml b/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml index 0bd7d2c371bfc..1c51bfa3c5347 100644 --- a/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml +++ b/plugins/analysis-phonenumber/src/yamlRestTest/resources/rest-api-spec/test/analysis-phone/20_search.yml @@ -32,9 +32,37 @@ index: test id: 1 body: { "phone": "+41 58 316 10 10", "phone-ch": "058 316 10 10" } + - do: + index: + index: test + id: 2 + body: { "phone": "+41 58 316 99 99", "phone-ch": "058 316 99 99" } + - do: + index: + index: test + id: 3 + # number not used in the examples below, just present to make sure that it's never matched + body: { "phone": "+41 12 345 67 89", "phone-ch": "012 345 67 89" } + - do: + index: + index: test + id: 4 + # germany has a different phone number length, but for this test we ignore it and pretend they're the same + body: { "phone": "+49 58 316 10 10", "phone-ch": "+49 58 316 10 10" } + - do: + index: + index: test + id: 5 + body: { "phone": "+1-888-280-4331", "phone-ch": "+1-888-280-4331" } + - do: + index: + index: test + id: 6 + body: { "phone": "tel:+441344840400", "phone-ch": "tel:+441344840400" } - do: indices.refresh: {} + # international format in document & search will always work - do: search: rest_total_hits_as_int: true @@ -45,6 +73,7 @@ "phone": "+41583161010" - match: { hits.total: 1 } + # correct national format & international format in search will always work - do: search: rest_total_hits_as_int: true @@ -54,3 +83,113 @@ match: "phone-ch": "+41583161010" - match: { hits.total: 1 } + + # national format without country specified won't work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "0583161010" + - match: { hits.total: 0 } + + # correct national format with country specified in document & search will always work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone-ch": "0583161010" + - match: { hits.total: 1 } + + # search-as-you-type style query + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "+4158316" + - match: { hits.total: 2 } + + # search-as-you-type style query + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone-ch": "058316" + - match: { hits.total: 2 } + + # international format in document & search will always work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "+1 888 280 4331" + - match: { hits.total: 1 } + + # international format in document & search will always work + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone-ch": "+1 888 280 4331" + - match: { hits.total: 1 } + + # national format in search won't work if no country is specified + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "888 280 4331" + - match: { hits.total: 0 } + + # document & search have a tel: prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "tel:+441344840400" + - match: { hits.total: 1 } + + # only document has a tel: prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "+441344840400" + - match: { hits.total: 1 } + + # only search has a tel: prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match: + "phone": "tel:+1 888 280 4331" + - match: { hits.total: 1 } From 8191de85856d291507d09a7fd425908843ed8675 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 10 Jan 2025 11:32:25 -0800 Subject: [PATCH 20/37] Limit RW separation to remote store enabled clusters and update recovery flow (#16760) * Update search only replica recovery flow This PR includes multiple changes to search replica recovery. 1. Change search only replica copies to recover as empty store instead of PEER. This will run a store recovery that syncs segments from remote store directly and eliminate any primary communication. 2. Remove search replicas from the in-sync allocation ID set and update routing table to exclude them from allAllocationIds. This ensures primaries aren't tracking or validating the routing table for any search replica's presence. 3. Change search replica validation to require remote store. There are versions of the above changes that are still possible with primary based node-node replication, but I don't think they are worth making at this time. Signed-off-by: Marc Handalian * more coverage Signed-off-by: Marc Handalian * add changelog entry Signed-off-by: Marc Handalian * add assertions that Search Replicas are not in the in-sync id set nor the AllAllocationIds set in the routing table Signed-off-by: Marc Handalian * update async task to only run if the FF is enabled and we are a remote store cluster. This check had previously only checked for segrep Signed-off-by: Marc Handalian * clean up max shards logic Signed-off-by: Marc Handalian * remove search replicas from check during renewPeerRecoveryRetentionLeases Signed-off-by: Marc Handalian * Revert "update async task to only run if the FF is enabled and we are a remote store cluster." reverting this, we already check for remote store earlier. This reverts commit 48ca1a3050d0f24757c70ae23a9d9e185cb3bc40. Signed-off-by: Marc Handalian * Add more tests for failover case Signed-off-by: Marc Handalian * Update remotestore restore logic and add test ensuring we can restore only writers when red Signed-off-by: Marc Handalian * Fix Search replicas to honor node level recovery limits Signed-off-by: Marc Handalian * Fix translog UUID mismatch on existing store recovery. This commit adds PR feedback and recovery tests post node restart. Signed-off-by: Marc Handalian * Fix spotless Signed-off-by: Marc Handalian * Fix bug with remote restore and add more tests Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian --- CHANGELOG.md | 1 + .../SearchReplicaFilteringAllocationIT.java | 3 +- ...SearchReplicaReplicationAndRecoveryIT.java | 325 ++++++++++++++++++ .../SearchReplicaReplicationIT.java | 134 -------- .../replication/SearchReplicaRestoreIT.java | 68 +--- .../indices/settings/SearchOnlyReplicaIT.java | 32 +- .../metadata/MetadataCreateIndexService.java | 9 +- .../MetadataUpdateSettingsService.java | 15 +- .../cluster/routing/IndexRoutingTable.java | 43 ++- .../routing/IndexShardRoutingTable.java | 17 + .../cluster/routing/ShardRouting.java | 6 +- .../allocation/IndexMetadataUpdater.java | 13 +- .../decider/ThrottlingAllocationDecider.java | 40 ++- .../index/seqno/ReplicationTracker.java | 1 + .../opensearch/index/shard/IndexShard.java | 21 +- .../index/shard/ReplicationGroup.java | 6 +- .../opensearch/index/shard/StoreRecovery.java | 15 +- .../metadata/SearchOnlyReplicaTests.java | 163 +++++---- .../SearchReplicaAllocationDeciderTests.java | 184 ++++++++++ .../gateway/ClusterStateUpdatersTests.java | 143 ++++++++ .../index/shard/IndexShardTests.java | 46 +++ .../cluster/routing/TestShardRouting.java | 22 ++ 22 files changed, 951 insertions(+), 356 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java delete mode 100644 server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java diff --git a/CHANGELOG.md b/CHANGELOG.md index a57561da861ee..a46359520e9e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) - Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909)) - Use the correct type to widen the sort fields when merging top docs ([#16881](https://github.com/opensearch-project/OpenSearch/pull/16881)) +- Limit reader writer separation to remote store enabled clusters [#16760](https://github.com/opensearch-project/OpenSearch/pull/16760) ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java index 5f65d6647f26d..df2620b794686 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/SearchReplicaFilteringAllocationIT.java @@ -14,6 +14,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.List; @@ -23,7 +24,7 @@ import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchReplicaFilteringAllocationIT extends OpenSearchIntegTestCase { +public class SearchReplicaFilteringAllocationIT extends RemoteStoreBaseIntegTestCase { @Override protected Settings featureFlagSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java new file mode 100644 index 0000000000000..7d4dd62cdca61 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationAndRecoveryIT.java @@ -0,0 +1,325 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.indices.recovery.RecoveryRequest; +import org.opensearch.action.admin.indices.recovery.RecoveryResponse; +import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.RecoverySource; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.SegmentReplicationPerGroupStats; +import org.opensearch.index.SegmentReplicationShardStats; +import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.junit.After; + +import java.nio.file.Path; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.routing.RecoverySource.Type.EMPTY_STORE; +import static org.opensearch.cluster.routing.RecoverySource.Type.EXISTING_STORE; +import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class SearchReplicaReplicationAndRecoveryIT extends SegmentReplicationBaseIT { + + private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); + } + + @After + public void teardown() { + clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); + + } + + @Override + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); + } + + public void testReplication() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(docCount, primary, replica); + } + + public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final List nodes = internalCluster().startDataOnlyNodes(2); + createIndex( + INDEX_NAME, + Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .put("number_of_search_only_replicas", 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + ensureGreen(INDEX_NAME); + + final int docCount = 5; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + waitForSearchableDocs(docCount, nodes); + + SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() + .indices() + .prepareSegmentReplicationStats(INDEX_NAME) + .setDetailed(true) + .execute() + .actionGet(); + + // Verify the number of indices + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size()); + // Verify total shards + assertEquals(2, segmentReplicationStatsResponse.getTotalShards()); + // Verify the number of primary shards + assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).size()); + + SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0); + Set replicaStats = perGroupStats.getReplicaStats(); + // Verify the number of replica stats + assertEquals(1, replicaStats.size()); + for (SegmentReplicationShardStats replicaStat : replicaStats) { + assertNotNull(replicaStat.getCurrentReplicationState()); + } + } + + public void testSearchReplicaRecovery() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + final String replica = internalCluster().startDataOnlyNode(); + + // ensure search replicas are only allocated to "replica" node. + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", replica)) + .execute() + .actionGet(); + + createIndex(INDEX_NAME); + ensureGreen(INDEX_NAME); + assertRecoverySourceType(replica, EMPTY_STORE); + + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + flush(INDEX_NAME); + waitForSearchableDocs(10, primary, replica); + + // Node stats should show remote download stats as nonzero, use this as a precondition to compare + // post restart. + assertDownloadStats(replica, true); + NodesStatsResponse nodesStatsResponse; + NodeStats nodeStats; + + internalCluster().restartNode(replica); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + // assert existing store recovery + assertRecoverySourceType(replica, EXISTING_STORE); + assertDownloadStats(replica, false); + } + + public void testRecoveryAfterDocsIndexed() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + assertRecoverySourceType(replica, EMPTY_STORE); + // replica should have downloaded from remote + assertDownloadStats(replica, true); + + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0)) + .get(); + + ensureGreen(INDEX_NAME); + + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)) + .get(); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + internalCluster().restartNode(replica); + + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + assertRecoverySourceType(replica, EXISTING_STORE); + assertDownloadStats(replica, false); + } + + private static void assertRecoverySourceType(String replica, RecoverySource.Type recoveryType) throws InterruptedException, + ExecutionException { + RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(INDEX_NAME)).get(); + for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get(INDEX_NAME)) { + if (recoveryState.getPrimary() == false) { + assertEquals("All SR should be of expected recovery type", recoveryType, recoveryState.getRecoverySource().getType()); + assertEquals("All SR should be on the specified node", replica, recoveryState.getTargetNode().getName()); + } + } + } + + private static void assertDownloadStats(String replica, boolean expectBytesDownloaded) throws InterruptedException, ExecutionException { + NodesStatsResponse nodesStatsResponse = client().admin().cluster().nodesStats(new NodesStatsRequest(replica)).get(); + assertEquals(1, nodesStatsResponse.getNodes().size()); + NodeStats nodeStats = nodesStatsResponse.getNodes().get(0); + assertEquals(replica, nodeStats.getNode().getName()); + if (expectBytesDownloaded) { + assertTrue(nodeStats.getIndices().getSegments().getRemoteSegmentStats().getDownloadBytesStarted() > 0); + } else { + assertEquals(0, nodeStats.getIndices().getSegments().getRemoteSegmentStats().getDownloadBytesStarted()); + } + } + + public void testStopPrimary_RestoreOnNewNode() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + assertDocCounts(docCount, primary); + + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + assertDocCounts(docCount, replica); + // stop the primary + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + + assertBusy(() -> { + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); + assertEquals(ClusterHealthStatus.RED, clusterHealthResponse.getStatus()); + }); + assertDocCounts(docCount, replica); + + String restoredPrimary = internalCluster().startDataOnlyNode(); + + client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), PlainActionFuture.newFuture()); + ensureGreen(INDEX_NAME); + assertDocCounts(docCount, replica, restoredPrimary); + + for (int i = docCount; i < docCount * 2; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + assertBusy(() -> assertDocCounts(20, replica, restoredPrimary)); + } + + public void testFailoverToNewPrimaryWithPollingReplication() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + final String primary = internalCluster().startDataOnlyNode(); + createIndex(INDEX_NAME); + ensureYellowAndNoInitializingShards(INDEX_NAME); + final int docCount = 10; + for (int i = 0; i < docCount; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + + final String replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + assertDocCounts(10, replica); + + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1)) + .get(); + final String writer_replica = internalCluster().startDataOnlyNode(); + ensureGreen(INDEX_NAME); + + // stop the primary + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + + assertBusy(() -> { + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); + assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus()); + }); + ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth(INDEX_NAME).get(); + assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus()); + assertDocCounts(10, replica); + + for (int i = docCount; i < docCount * 2; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); + } + refresh(INDEX_NAME); + assertBusy(() -> assertDocCounts(20, replica, writer_replica)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java deleted file mode 100644 index f660695af9965..0000000000000 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaReplicationIT.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.indices.replication; - -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; -import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; -import org.opensearch.index.SegmentReplicationPerGroupStats; -import org.opensearch.index.SegmentReplicationShardStats; -import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.After; -import org.junit.Before; - -import java.nio.file.Path; -import java.util.List; -import java.util.Set; - -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchReplicaReplicationIT extends SegmentReplicationBaseIT { - - private static final String REPOSITORY_NAME = "test-remote-store-repo"; - protected Path absolutePath; - - private Boolean useRemoteStore; - - @Before - public void randomizeRemoteStoreEnabled() { - useRemoteStore = randomBoolean(); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - if (useRemoteStore) { - if (absolutePath == null) { - absolutePath = randomRepoPath().toAbsolutePath(); - } - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) - .build(); - } - return super.nodeSettings(nodeOrdinal); - } - - @After - public void teardown() { - if (useRemoteStore) { - clusterAdmin().prepareCleanupRepository(REPOSITORY_NAME).get(); - } - } - - @Override - public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build(); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); - } - - public void testReplication() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final String primary = internalCluster().startDataOnlyNode(); - createIndex(INDEX_NAME); - ensureYellowAndNoInitializingShards(INDEX_NAME); - final String replica = internalCluster().startDataOnlyNode(); - ensureGreen(INDEX_NAME); - - final int docCount = 10; - for (int i = 0; i < docCount; i++) { - client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); - } - refresh(INDEX_NAME); - waitForSearchableDocs(docCount, primary, replica); - } - - public void testSegmentReplicationStatsResponseWithSearchReplica() throws Exception { - internalCluster().startClusterManagerOnlyNode(); - final List nodes = internalCluster().startDataOnlyNodes(2); - createIndex( - INDEX_NAME, - Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .put("number_of_search_only_replicas", 1) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .build() - ); - ensureGreen(INDEX_NAME); - - final int docCount = 5; - for (int i = 0; i < docCount; i++) { - client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().get(); - } - refresh(INDEX_NAME); - waitForSearchableDocs(docCount, nodes); - - SegmentReplicationStatsResponse segmentReplicationStatsResponse = dataNodeClient().admin() - .indices() - .prepareSegmentReplicationStats(INDEX_NAME) - .setDetailed(true) - .execute() - .actionGet(); - - // Verify the number of indices - assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().size()); - // Verify total shards - assertEquals(2, segmentReplicationStatsResponse.getTotalShards()); - // Verify the number of primary shards - assertEquals(1, segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).size()); - - SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0); - Set replicaStats = perGroupStats.getReplicaStats(); - // Verify the number of replica stats - assertEquals(1, replicaStats.size()); - for (SegmentReplicationShardStats replicaStat : replicaStats) { - assertNotNull(replicaStat.getCurrentReplicationState()); - } - } -} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java index 352332b962c92..e8d65e07c7dd9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SearchReplicaRestoreIT.java @@ -15,7 +15,7 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.remotestore.RemoteSnapshotIT; import org.opensearch.snapshots.SnapshotRestoreException; import org.opensearch.test.OpenSearchIntegTestCase; @@ -26,7 +26,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchReplicaRestoreIT extends AbstractSnapshotIntegTestCase { +public class SearchReplicaRestoreIT extends RemoteSnapshotIT { private static final String INDEX_NAME = "test-idx-1"; private static final String RESTORED_INDEX_NAME = INDEX_NAME + "-restored"; @@ -40,49 +40,6 @@ protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.READER_WRITER_SPLIT_EXPERIMENTAL, true).build(); } - public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnDocRepWithSearchReplica() throws Exception { - bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT); - createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); - - SnapshotRestoreException exception = expectThrows( - SnapshotRestoreException.class, - () -> restoreSnapshot( - REPOSITORY_NAME, - SNAPSHOT_NAME, - INDEX_NAME, - RESTORED_INDEX_NAME, - Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ) - ); - assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.DOCUMENT, ReplicationType.DOCUMENT))); - } - - public void testSearchReplicaRestore_WhenSnapshotOnDocRep_RestoreOnSegRepWithSearchReplica() throws Exception { - bootstrapIndexWithOutSearchReplicas(ReplicationType.DOCUMENT); - createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); - - restoreSnapshot( - REPOSITORY_NAME, - SNAPSHOT_NAME, - INDEX_NAME, - RESTORED_INDEX_NAME, - Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ); - ensureYellowAndNoInitializingShards(RESTORED_INDEX_NAME); - internalCluster().startDataOnlyNode(); - ensureGreen(RESTORED_INDEX_NAME); - assertEquals(1, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); - - SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHitCount(resp, DOC_COUNT); - } - public void testSearchReplicaRestore_WhenSnapshotOnSegRep_RestoreOnDocRepWithSearchReplica() throws Exception { bootstrapIndexWithOutSearchReplicas(ReplicationType.SEGMENT); createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); @@ -140,27 +97,6 @@ public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_Resto assertTrue(exception.getMessage().contains(getSnapshotExceptionMessage(ReplicationType.SEGMENT, ReplicationType.DOCUMENT))); } - public void testSearchReplicaRestore_WhenSnapshotOnSegRepWithSearchReplica_RestoreOnDocRepWithNoSearchReplica() throws Exception { - bootstrapIndexWithSearchReplicas(); - createRepoAndSnapshot(REPOSITORY_NAME, FS_REPOSITORY_TYPE, SNAPSHOT_NAME, INDEX_NAME); - - restoreSnapshot( - REPOSITORY_NAME, - SNAPSHOT_NAME, - INDEX_NAME, - RESTORED_INDEX_NAME, - Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0) - .build() - ); - ensureGreen(RESTORED_INDEX_NAME); - assertEquals(0, getNumberOfSearchReplicas(RESTORED_INDEX_NAME)); - - SearchResponse resp = client().prepareSearch(RESTORED_INDEX_NAME).setQuery(QueryBuilders.matchAllQuery()).get(); - assertHitCount(resp, DOC_COUNT); - } - private void bootstrapIndexWithOutSearchReplicas(ReplicationType replicationType) throws InterruptedException { startCluster(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java index fa836e2cc5784..f524f4d1298c1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/settings/SearchOnlyReplicaIT.java @@ -20,6 +20,7 @@ import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -31,7 +32,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class SearchOnlyReplicaIT extends OpenSearchIntegTestCase { +public class SearchOnlyReplicaIT extends RemoteStoreBaseIntegTestCase { private static final String TEST_INDEX = "test_index"; @@ -55,35 +56,6 @@ public Settings indexSettings() { .build(); } - public void testCreateDocRepFails() { - Settings settings = Settings.builder().put(indexSettings()).put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); - - IllegalArgumentException illegalArgumentException = expectThrows( - IllegalArgumentException.class, - () -> createIndex(TEST_INDEX, settings) - ); - assertEquals(expectedFailureMessage, illegalArgumentException.getMessage()); - } - - public void testUpdateDocRepFails() { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .build(); - // create succeeds - createIndex(TEST_INDEX, settings); - - // update fails - IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> { - client().admin() - .indices() - .prepareUpdateSettings(TEST_INDEX) - .setSettings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1)) - .get(); - }); - assertEquals(expectedFailureMessage, illegalArgumentException.getMessage()); - } - public void testFailoverWithSearchReplica_WithWriterReplicas() throws IOException { int numSearchReplicas = 1; int numWriterReplicas = 1; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 232201d18ba13..b5b2b71f977fa 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -1096,14 +1096,9 @@ static Settings aggregateIndexSettings( private static void updateSearchOnlyReplicas(Settings requestSettings, Settings.Builder builder) { if (INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.exists(builder) && builder.get(SETTING_NUMBER_OF_SEARCH_REPLICAS) != null) { if (INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings) > 0 - && ReplicationType.parseString(builder.get(INDEX_REPLICATION_TYPE_SETTING.getKey())).equals(ReplicationType.DOCUMENT)) { + && Boolean.parseBoolean(builder.get(SETTING_REMOTE_STORE_ENABLED)) == false) { throw new IllegalArgumentException( - "To set " - + SETTING_NUMBER_OF_SEARCH_REPLICAS - + ", " - + INDEX_REPLICATION_TYPE_SETTING.getKey() - + " must be set to " - + ReplicationType.SEGMENT + "To set " + SETTING_NUMBER_OF_SEARCH_REPLICAS + ", " + SETTING_REMOTE_STORE_ENABLED + " must be set to true" ); } builder.put(SETTING_NUMBER_OF_SEARCH_REPLICAS, INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings)); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java index 8c350d6b9cef5..a35af0e607c31 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataUpdateSettingsService.java @@ -63,7 +63,6 @@ import org.opensearch.index.IndexSettings; import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -77,8 +76,8 @@ import java.util.Set; import static org.opensearch.action.support.ContextPreservingActionListener.wrapPreservingContext; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateOverlap; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateRefreshIntervalSettings; import static org.opensearch.cluster.metadata.MetadataCreateIndexService.validateTranslogDurabilitySettings; @@ -538,14 +537,12 @@ public ClusterState execute(ClusterState currentState) { private void validateSearchReplicaCountSettings(Settings requestSettings, Index[] indices, ClusterState currentState) { final int updatedNumberOfSearchReplicas = IndexMetadata.INDEX_NUMBER_OF_SEARCH_REPLICAS_SETTING.get(requestSettings); if (updatedNumberOfSearchReplicas > 0) { - if (Arrays.stream(indices).allMatch(index -> currentState.metadata().isSegmentReplicationEnabled(index.getName())) == false) { + if (Arrays.stream(indices) + .allMatch( + index -> currentState.metadata().index(index.getName()).getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false) + ) == false) { throw new IllegalArgumentException( - "To set " - + SETTING_NUMBER_OF_SEARCH_REPLICAS - + ", " - + INDEX_REPLICATION_TYPE_SETTING.getKey() - + " must be set to " - + ReplicationType.SEGMENT + "To set " + SETTING_NUMBER_OF_SEARCH_REPLICAS + ", " + SETTING_REMOTE_STORE_ENABLED + " must be set to true" ); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java index b4592659bb70f..08574dddc007c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java @@ -149,7 +149,10 @@ boolean validate(Metadata metadata) { "Shard [" + indexShardRoutingTable.shardId().id() + "] routing table has wrong number of replicas, expected [" + + "Replicas: " + indexMetadata.getNumberOfReplicas() + + "Search Replicas: " + + indexMetadata.getNumberOfSearchOnlyReplicas() + "], got [" + routingNumberOfReplicas + "]" @@ -514,15 +517,31 @@ public Builder initializeAsRemoteStoreRestore( ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) ); } + // if writers are red we do not want to re-recover search only shards if already assigned. + for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) { + if (shardRouting.unassigned()) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) + ); + } else { + indexShardRoutingBuilder.addShard(shardRouting); + } + } } else { // Primary is either active or initializing. Do not trigger restore. indexShardRoutingBuilder.addShard(indexShardRoutingTable.primaryShard()); // Replica, if unassigned, trigger peer recovery else no action. for (ShardRouting shardRouting : indexShardRoutingTable.replicaShards()) { if (shardRouting.unassigned()) { - indexShardRoutingBuilder.addShard( - ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) - ); + if (shardRouting.isSearchOnly()) { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) + ); + } else { + indexShardRoutingBuilder.addShard( + ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, unassignedInfo) + ); + } } else { indexShardRoutingBuilder.addShard(shardRouting); } @@ -575,13 +594,7 @@ private Builder initializeAsRestore( } for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) { indexShardRoutingBuilder.addShard( - ShardRouting.newUnassigned( - shardId, - false, - true, - PeerRecoverySource.INSTANCE, // TODO: Update to remote store if enabled - unassignedInfo - ) + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) ); } shards.put(shardNumber, indexShardRoutingBuilder.build()); @@ -624,13 +637,7 @@ private Builder initializeEmpty(IndexMetadata indexMetadata, UnassignedInfo unas } for (int i = 0; i < indexMetadata.getNumberOfSearchOnlyReplicas(); i++) { indexShardRoutingBuilder.addShard( - ShardRouting.newUnassigned( - shardId, - false, - true, - PeerRecoverySource.INSTANCE, // TODO: Update to remote store if enabled - unassignedInfo - ) + ShardRouting.newUnassigned(shardId, false, true, EmptyStoreRecoverySource.INSTANCE, unassignedInfo) ); } shards.put(shardNumber, indexShardRoutingBuilder.build()); @@ -665,7 +672,7 @@ public Builder addSearchReplica() { shardId, false, true, - PeerRecoverySource.INSTANCE, // TODO: Change to remote store if enabled + EmptyStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null) ); shards.put(shardNumber, new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build()); diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java index f25cb14f65eca..eb4177d7046ca 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/opensearch/cluster/routing/IndexShardRoutingTable.java @@ -134,6 +134,23 @@ public class IndexShardRoutingTable extends AbstractDiffable assignedShards = newShardRoutingTable.assignedShards() .stream() .filter(s -> s.isRelocationTarget() == false) + .filter(s -> s.isSearchOnly() == false) // do not consider search only shards for in sync validation .collect(Collectors.toList()); assert assignedShards.size() <= maxActiveShards : "cannot have more assigned shards " + assignedShards diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 4bde1e282fe78..32639bc3065da 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -191,7 +191,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing } } else { // Peer recovery - assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER; + assert initializingShard(shardRouting, node.nodeId()).recoverySource().getType() == RecoverySource.Type.PEER + || shardRouting.isSearchOnly(); if (shardRouting.unassignedReasonIndexCreated()) { return allocateInitialShardCopies(shardRouting, node, allocation); @@ -204,7 +205,6 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { int currentInRecoveries = allocation.routingNodes().getInitialIncomingRecoveries(node.nodeId()); assert shardRouting.unassignedReasonIndexCreated() && !shardRouting.primary(); - return allocateShardCopies( shardRouting, allocation, @@ -212,7 +212,8 @@ private Decision allocateInitialShardCopies(ShardRouting shardRouting, RoutingNo replicasInitialRecoveries, this::getInitialPrimaryNodeOutgoingRecoveries, replicasInitialRecoveries, - true + true, + node ); } @@ -228,7 +229,8 @@ private Decision allocateNonInitialShardCopies(ShardRouting shardRouting, Routin concurrentIncomingRecoveries, this::getPrimaryNodeOutgoingRecoveries, concurrentOutgoingRecoveries, - false + false, + node ); } @@ -249,7 +251,8 @@ private Decision allocateShardCopies( int inRecoveriesLimit, BiFunction primaryNodeOutRecoveriesFunc, int outRecoveriesLimit, - boolean isInitialShardCopies + boolean isInitialShardCopies, + RoutingNode candidateNode ) { // Allocating a shard to this node will increase the incoming recoveries if (currentInRecoveries >= inRecoveriesLimit) { @@ -274,6 +277,16 @@ private Decision allocateShardCopies( ); } } else { + // if this is a search shard that recovers from remote store, ignore outgoing recovery limits. + if (shardRouting.isSearchOnly() && candidateNode.node().isRemoteStoreNode()) { + return allocation.decision( + YES, + NAME, + "Remote based search replica below incoming recovery limit: [%d < %d]", + currentInRecoveries, + inRecoveriesLimit + ); + } // search for corresponding recovery source (= primary shard) and check number of outgoing recoveries on that node ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); if (primaryShard == null) { @@ -319,6 +332,10 @@ private Decision allocateShardCopies( } } + private static boolean isRemoteStoreNode(ShardRouting shardRouting, RoutingAllocation allocation) { + return allocation.nodes().getNodes().get(shardRouting.currentNodeId()).isRemoteStoreNode(); + } + /** * The shard routing passed to {@link #canAllocate(ShardRouting, RoutingNode, RoutingAllocation)} is not the initializing shard to this * node but: @@ -357,9 +374,18 @@ private ShardRouting initializingShard(ShardRouting shardRouting, String current @Override public Decision canMoveAway(ShardRouting shardRouting, RoutingAllocation allocation) { int outgoingRecoveries = 0; - if (!shardRouting.primary() && !shardRouting.isSearchOnly()) { + if (!shardRouting.primary()) { ShardRouting primaryShard = allocation.routingNodes().activePrimary(shardRouting.shardId()); - outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId()); + if (primaryShard != null) { + outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(primaryShard.currentNodeId()); + } else { + assert shardRouting.isSearchOnly(); + // check if the moving away search replica is using remote store, if not + // throw an error as the primary it will use for recovery is not active. + if (isRemoteStoreNode(shardRouting, allocation) == false) { + return allocation.decision(Decision.NO, NAME, "primary shard for this replica is not yet active"); + } + } } else { outgoingRecoveries = allocation.routingNodes().getOutgoingRecoveries(shardRouting.currentNodeId()); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 1e43827afeb47..57ade7fa10cd0 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -633,6 +633,7 @@ public synchronized void renewPeerRecoveryRetentionLeases() { */ final boolean renewalNeeded = StreamSupport.stream(routingTable.spliterator(), false) .filter(ShardRouting::assignedToNode) + .filter(r -> r.isSearchOnly() == false) .anyMatch(shardRouting -> { final RetentionLease retentionLease = retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)); if (retentionLease == null) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index f5de4dfb5a933..02f20504b07ba 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2540,22 +2540,24 @@ public void openEngineAndRecoverFromTranslog(boolean syncFromRemote) throws IOEx */ public void openEngineAndSkipTranslogRecovery() throws IOException { assert routingEntry().recoverySource().getType() == RecoverySource.Type.PEER : "not a peer recovery [" + routingEntry() + "]"; - recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); - loadGlobalCheckpointToReplicationTracker(); - innerOpenEngineAndTranslog(replicationTracker); - getEngine().translogManager().skipTranslogRecovery(); + openEngineAndSkipTranslogRecovery(true); } public void openEngineAndSkipTranslogRecoveryFromSnapshot() throws IOException { - assert routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT : "not a snapshot recovery [" - + routingEntry() - + "]"; + assert routingEntry().isSearchOnly() || routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT + : "not a snapshot recovery [" + routingEntry() + "]"; recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); maybeCheckIndex(); recoveryState.setStage(RecoveryState.Stage.TRANSLOG); + openEngineAndSkipTranslogRecovery(routingEntry().isSearchOnly()); + } + + void openEngineAndSkipTranslogRecovery(boolean syncFromRemote) throws IOException { recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); loadGlobalCheckpointToReplicationTracker(); - innerOpenEngineAndTranslog(replicationTracker, false); + innerOpenEngineAndTranslog(replicationTracker, syncFromRemote); + assert routingEntry().isSearchOnly() == false || translogStats().estimatedNumberOfOperations() == 0 + : "Translog is expected to be empty but holds " + translogStats().estimatedNumberOfOperations() + "Operations."; getEngine().translogManager().skipTranslogRecovery(); } @@ -2905,7 +2907,8 @@ public void recoverFromLocalShards( public void recoverFromStore(ActionListener listener) { // we are the first primary, recover from the gateway // if its post api allocation, the index should exists - assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; + assert shardRouting.primary() || shardRouting.isSearchOnly() + : "recover from store only makes sense if the shard is a primary shard or an untracked search only replica"; assert shardRouting.initializing() : "can only start recovery on initializing shard"; StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); storeRecovery.recoverFromStore(this, listener); diff --git a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java index ccfaf50da1c6b..b2db48737ee3f 100644 --- a/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java +++ b/server/src/main/java/org/opensearch/index/shard/ReplicationGroup.java @@ -67,15 +67,17 @@ public ReplicationGroup( this.inSyncAllocationIds = inSyncAllocationIds; this.trackedAllocationIds = trackedAllocationIds; this.version = version; - this.unavailableInSyncShards = Sets.difference(inSyncAllocationIds, routingTable.getAllAllocationIds()); this.replicationTargets = new ArrayList<>(); this.skippedShards = new ArrayList<>(); for (final ShardRouting shard : routingTable) { - // search only replicas never receive any replicated operations if (shard.unassigned() || shard.isSearchOnly()) { assert shard.primary() == false : "primary shard should not be unassigned in a replication group: " + shard; skippedShards.add(shard); + if (shard.isSearchOnly()) { + assert shard.allocationId() == null || inSyncAllocationIds.contains(shard.allocationId().getId()) == false + : " Search replicas should not be part of the inSync id set"; + } } else { if (trackedAllocationIds.contains(shard.allocationId().getId())) { replicationTargets.add(shard); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 6933e4e161dd1..74d9cc4b4f6dd 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -544,7 +544,7 @@ private boolean canRecover(IndexShard indexShard) { // got closed on us, just ignore this recovery return false; } - if (indexShard.routingEntry().primary() == false) { + if (indexShard.routingEntry().primary() == false && indexShard.routingEntry().isSearchOnly() == false) { throw new IndexShardRecoveryException(shardId, "Trying to recover when the shard is in backup state", null); } return true; @@ -747,7 +747,17 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe writeEmptyRetentionLeasesFile(indexShard); indexShard.recoveryState().getIndex().setFileDetailsComplete(); } - indexShard.openEngineAndRecoverFromTranslog(); + if (indexShard.routingEntry().isSearchOnly() == false) { + indexShard.openEngineAndRecoverFromTranslog(); + } else { + // Opens the engine for pull based replica copies that are + // not primary eligible. This will skip any checkpoint tracking and ensure + // that the shards are sync'd with remote store before opening. + // + // first bootstrap new history / translog so that the TranslogUUID matches the UUID from the latest commit. + bootstrapForSnapshot(indexShard, store); + indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); + } if (indexShard.shouldSeedRemoteStore()) { indexShard.getThreadPool().executor(ThreadPool.Names.GENERIC).execute(() -> { logger.info("Attempting to seed Remote Store via local recovery for {}", indexShard.shardId()); @@ -878,6 +888,7 @@ private void bootstrap(final IndexShard indexShard, final Store store) throws IO store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), localCheckpoint, diff --git a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java index 3d11193a07884..81055e01d915b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/SearchOnlyReplicaTests.java @@ -19,32 +19,46 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.common.ValidationException; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.env.Environment; +import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.cluster.ClusterStateChanges; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public class SearchOnlyReplicaTests extends OpenSearchSingleNodeTestCase { + public static final String TEST_RS_REPO = "test-rs-repo"; + public static final String INDEX_NAME = "test-index"; private ThreadPool threadPool; @Before @@ -70,7 +84,7 @@ protected Settings featureFlagSettings() { public void testCreateWithDefaultSearchReplicasSetting() { final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); ClusterState state = createIndexWithSettings(cluster, Settings.builder().build()); - IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index(INDEX_NAME).getShards().get(0); assertEquals(1, indexShardRoutingTable.replicaShards().size()); assertEquals(0, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(1, indexShardRoutingTable.writerReplicas().size()); @@ -91,53 +105,50 @@ public void testSearchReplicasValidationWithDocumentReplication() { ) ); assertEquals( - "To set index.number_of_search_only_replicas, index.replication.type must be set to SEGMENT", + "To set index.number_of_search_only_replicas, index.remote_store.enabled must be set to true", exception.getCause().getMessage() ); } - public void testUpdateSearchReplicaCount() { - final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); + public void testUpdateSearchReplicaCount() throws ExecutionException, InterruptedException { + Settings settings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) + .build(); + createIndex(INDEX_NAME, settings); - ClusterState state = createIndexWithSettings( - cluster, - Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ); - assertTrue(state.metadata().hasIndex("index")); - rerouteUntilActive(state, cluster); - IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + IndexShardRoutingTable indexShardRoutingTable = getIndexShardRoutingTable(); assertEquals(1, indexShardRoutingTable.replicaShards().size()); assertEquals(1, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(0, indexShardRoutingTable.writerReplicas().size()); // add another replica - state = cluster.updateSettings( - state, - new UpdateSettingsRequest("index").settings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 2).build()) + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 2).build() ); - rerouteUntilActive(state, cluster); - indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + client().admin().indices().updateSettings(updateSettingsRequest).get(); + indexShardRoutingTable = getIndexShardRoutingTable(); assertEquals(2, indexShardRoutingTable.replicaShards().size()); assertEquals(2, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(0, indexShardRoutingTable.writerReplicas().size()); // remove all replicas - state = cluster.updateSettings( - state, - new UpdateSettingsRequest("index").settings(Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0).build()) + updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0).build() ); - rerouteUntilActive(state, cluster); - indexShardRoutingTable = state.getRoutingTable().index("index").getShards().get(0); + client().admin().indices().updateSettings(updateSettingsRequest).get(); + indexShardRoutingTable = getIndexShardRoutingTable(); assertEquals(0, indexShardRoutingTable.replicaShards().size()); assertEquals(0, indexShardRoutingTable.searchOnlyReplicas().size()); assertEquals(0, indexShardRoutingTable.writerReplicas().size()); } + private IndexShardRoutingTable getIndexShardRoutingTable() { + return client().admin().cluster().prepareState().get().getState().getRoutingTable().index(INDEX_NAME).getShards().get(0); + } + private ClusterState createIndexWithSettings(ClusterStateChanges cluster, Settings settings) { List allNodes = new ArrayList<>(); // node for primary/local @@ -149,48 +160,32 @@ private ClusterState createIndexWithSettings(ClusterStateChanges cluster, Settin } ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); - CreateIndexRequest request = new CreateIndexRequest("index", settings).waitForActiveShards(ActiveShardCount.NONE); + CreateIndexRequest request = new CreateIndexRequest(INDEX_NAME, settings).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); return state; } public void testUpdateSearchReplicasOverShardLimit() { - final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); - - List allNodes = new ArrayList<>(); - // node for primary/local - DiscoveryNode localNode = createNode(Version.CURRENT, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE); - allNodes.add(localNode); - - allNodes.add(createNode(Version.CURRENT, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)); - - ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); + Settings settings = Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 0) + .build(); + createIndex(INDEX_NAME, settings); + Integer maxShardPerNode = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getDefault(Settings.EMPTY); - CreateIndexRequest request = new CreateIndexRequest( - "index", - Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .put(INDEX_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(SETTING_NUMBER_OF_SEARCH_REPLICAS, 1) - .build() - ).waitForActiveShards(ActiveShardCount.NONE); - state = cluster.createIndex(state, request); - assertTrue(state.metadata().hasIndex("index")); - rerouteUntilActive(state, cluster); + UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(INDEX_NAME).settings( + Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build() + ); // add another replica - ClusterState finalState = state; - Integer maxShardPerNode = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getDefault(Settings.EMPTY); - expectThrows( - RuntimeException.class, - () -> cluster.updateSettings( - finalState, - new UpdateSettingsRequest("index").settings( - Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build() - ) - ) + ExecutionException executionException = expectThrows( + ExecutionException.class, + () -> client().admin().indices().updateSettings(updateSettingsRequest).get() ); + Throwable cause = executionException.getCause(); + assertEquals(ValidationException.class, cause.getClass()); } public void testUpdateSearchReplicasOnDocrepCluster() { @@ -206,7 +201,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() { ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); CreateIndexRequest request = new CreateIndexRequest( - "index", + INDEX_NAME, Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, 1) .put(SETTING_NUMBER_OF_REPLICAS, 0) @@ -214,7 +209,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() { .build() ).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); - assertTrue(state.metadata().hasIndex("index")); + assertTrue(state.metadata().hasIndex(INDEX_NAME)); rerouteUntilActive(state, cluster); // add another replica @@ -224,7 +219,7 @@ public void testUpdateSearchReplicasOnDocrepCluster() { RuntimeException.class, () -> cluster.updateSettings( finalState, - new UpdateSettingsRequest("index").settings( + new UpdateSettingsRequest(INDEX_NAME).settings( Settings.builder().put(SETTING_NUMBER_OF_SEARCH_REPLICAS, maxShardPerNode * 2).build() ) ) @@ -232,11 +227,51 @@ public void testUpdateSearchReplicasOnDocrepCluster() { } + Path tempDir = createTempDir(); + Path repo = tempDir.resolve("repo"); + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(buildRemoteStoreNodeAttributes(TEST_RS_REPO, repo)) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(Environment.PATH_REPO_SETTING.getKey(), repo) + .build(); + } + + private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) { + String repoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + repoName + ); + String repoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), false) + .build(); + } + private static void rerouteUntilActive(ClusterState state, ClusterStateChanges cluster) { - while (state.routingTable().index("index").shard(0).allShardsStarted() == false) { + while (state.routingTable().index(INDEX_NAME).shard(0).allShardsStarted() == false) { state = cluster.applyStartedShards( state, - state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING) + state.routingTable().index(INDEX_NAME).shard(0).shardsWithState(ShardRoutingState.INITIALIZING) ); state = cluster.reroute(state, new ClusterRerouteRequest()); } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java index 8d4f4cdee26cc..9604e82fe4c88 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/SearchReplicaAllocationDeciderTests.java @@ -8,27 +8,44 @@ package org.opensearch.cluster.routing.allocation.decider; +import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.EmptyClusterInfoService; import org.opensearch.cluster.OpenSearchAllocationTestCase; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.routing.allocation.RoutingAllocation; import org.opensearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.opensearch.cluster.routing.allocation.command.AllocationCommands; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import static org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.cluster.routing.allocation.decider.SearchReplicaAllocationDecider.SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; public class SearchReplicaAllocationDeciderTests extends OpenSearchAllocationTestCase { @@ -130,4 +147,171 @@ public void testSearchReplicaRoutingDedicatedIncludes() { decision = (Decision.Single) filterAllocationDecider.canRemain(primary, state.getRoutingNodes().node("node1"), allocation); assertEquals(decision.toString(), Decision.Type.YES, decision.type()); } + + public void testSearchReplicaWithThrottlingDecider_PrimaryBasedReplication() { + TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator(); + // throttle outgoing on primary + AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator); + + Set> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .numberOfSearchReplicas(1) + ) + .build(); + + ClusterState clusterState = initializeClusterStateWithSingleIndexAndShard(newNode("node1"), metadata, gatewayAllocator); + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertEquals(2, clusterState.routingTable().shardsWithState(STARTED).size()); + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); + // start a third node, we will try and move the SR to this node + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + // remove the primary and reroute - this would throw an NPE for search replicas but *not* regular. + // regular replicas would get promoted to primary before the CanMoveAway call. + clusterState = strategy.disassociateDeadNodes( + ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(), + true, + "test" + ); + + // attempt to move the replica + AllocationService.CommandsResult commandsResult = strategy.reroute( + clusterState, + new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), + true, + false + ); + + assertEquals(commandsResult.explanations().explanations().size(), 1); + assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.NO); + boolean isCorrectNoDecision = false; + for (Decision decision : commandsResult.explanations().explanations().get(0).decisions().getDecisions()) { + if (decision.label().equals(ThrottlingAllocationDecider.NAME)) { + assertEquals("primary shard for this replica is not yet active", decision.getExplanation()); + assertEquals(Decision.Type.NO, decision.type()); + isCorrectNoDecision = true; + } + } + assertTrue(isCorrectNoDecision); + } + + public void testSearchReplicaWithThrottlingDeciderWithoutPrimary_RemoteStoreEnabled() { + TestGatewayAllocator gatewayAllocator = new TestGatewayAllocator(); + AllocationService strategy = createAllocationService(Settings.EMPTY, gatewayAllocator); + Set> settings = new HashSet<>(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + settings.add(SEARCH_REPLICA_ROUTING_INCLUDE_GROUP_SETTING); + Metadata metadata = Metadata.builder() + .put( + IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(0) + .numberOfSearchReplicas(1) + ) + .build(); + + ClusterState clusterState = initializeClusterStateWithSingleIndexAndShard(newRemoteNode("node1"), metadata, gatewayAllocator); + + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + DiscoveryNode node2 = newRemoteNode("node2"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(node2)).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertEquals(2, clusterState.routingTable().shardsWithState(STARTED).size()); + assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); + // start a third node, we will try and move the SR to this node + DiscoveryNode node3 = newRemoteNode("node3"); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(node3)).build(); + clusterState = strategy.reroute(clusterState, "reroute"); + // remove the primary and reroute - this would throw an NPE for search replicas but *not* regular. + // regular replicas would get promoted to primary before the CanMoveAway call. + clusterState = strategy.disassociateDeadNodes( + ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(), + true, + "test" + ); + + // attempt to move the replica + AllocationService.CommandsResult commandsResult = strategy.reroute( + clusterState, + new AllocationCommands(new MoveAllocationCommand("test", 0, "node2", "node3")), + true, + false + ); + + assertEquals(commandsResult.explanations().explanations().size(), 1); + assertEquals(commandsResult.explanations().explanations().get(0).decisions().type(), Decision.Type.NO); + boolean foundYesMessage = false; + for (Decision decision : commandsResult.explanations().explanations().get(0).decisions().getDecisions()) { + if (decision.label().equals(ThrottlingAllocationDecider.NAME)) { + assertEquals("Remote based search replica below incoming recovery limit: [0 < 2]", decision.getExplanation()); + assertEquals(Decision.Type.YES, decision.type()); + foundYesMessage = true; + } + } + assertTrue(foundYesMessage); + } + + private ClusterState initializeClusterStateWithSingleIndexAndShard( + DiscoveryNode primaryNode, + Metadata metadata, + TestGatewayAllocator gatewayAllocator + ) { + Metadata.Builder metadataBuilder = new Metadata.Builder(metadata); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + IndexMetadata indexMetadata = metadata.index("test"); + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetadata); + initializePrimaryAndMarkInSync(indexMetadata.getIndex(), indexMetadataBuilder, gatewayAllocator, primaryNode); + routingTableBuilder.addAsRecovery(indexMetadata); + metadataBuilder.put(indexMetadata, false); + RoutingTable routingTable = routingTableBuilder.build(); + return ClusterState.builder(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .nodes(DiscoveryNodes.builder().add(primaryNode)) + .metadata(metadataBuilder.build()) + .routingTable(routingTable) + .build(); + } + + private void initializePrimaryAndMarkInSync( + Index index, + IndexMetadata.Builder indexMetadata, + TestGatewayAllocator gatewayAllocator, + DiscoveryNode primaryNode + ) { + final ShardRouting unassigned = ShardRouting.newUnassigned( + new ShardId(index, 0), + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + ShardRouting started = ShardRoutingHelper.moveToStarted(ShardRoutingHelper.initialize(unassigned, primaryNode.getId())); + indexMetadata.putInSyncAllocationIds(0, Collections.singleton(started.allocationId().getId())); + gatewayAllocator.addKnownAllocation(started); + } + + private static DiscoveryNode newRemoteNode(String name) { + return newNode( + name, + name, + Map.of( + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, + "cluster-repo", + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + "segment-repo", + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + "translog-repo" + ) + ); + } } diff --git a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java index dd2fb51151a5b..d85ed10eeeae7 100644 --- a/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/opensearch/gateway/ClusterStateUpdatersTests.java @@ -44,6 +44,7 @@ import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; @@ -489,4 +490,146 @@ public void testHideStateIfNotRecovered() { assertFalse(hiddenState.blocks().hasIndexBlock(indexMetadata.getIndex().getName(), IndexMetadata.INDEX_READ_ONLY_BLOCK)); } + public void testRemoteRestoreWithSearchOnlyShards() { + final int numOfShards = 10; + final int numAssignedSearchReplicas = 5; + final int numOfSearchReplicas = 1; + + final IndexMetadata remoteMetadata = createIndexMetadata( + "test-remote", + Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numOfSearchReplicas) + .build() + ); + // create an initial routing table where all search replicas exist and are assigned, they should get included as is in the restored + // routing. + final Index index = remoteMetadata.getIndex(); + + Map routingTable = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + final String nodeId = "node " + shardNumber; + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder( + new ShardId(remoteMetadata.getIndex(), shardId.id()) + ); + // add a search replica for the shard + ShardRouting searchReplicaRouting = ShardRouting.newUnassigned( + shardId, + false, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + if (shardNumber < numAssignedSearchReplicas) { + // first five shards add the SR as assigned + builder.addShard(searchReplicaRouting.initialize(nodeId, null, 0L)); + } else { + builder.addShard(searchReplicaRouting); + } + routingTable.put(shardId, builder.build()); + } + IndexRoutingTable.Builder routingTableAfterRestore = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + routingTable, + true + ); + for (IndexShardRoutingTable indexShardRoutingTable : routingTableAfterRestore.build()) { + assertEquals(numOfSearchReplicas, indexShardRoutingTable.searchOnlyReplicas().size()); + for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) { + if (shardRouting.shardId().getId() < numAssignedSearchReplicas) { + assertTrue(shardRouting.assignedToNode()); + assertTrue(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } else { + assertTrue(shardRouting.unassigned()); + assertFalse(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } + } + } + } + + private boolean containsSameRouting(IndexShardRoutingTable oldRoutingTable, ShardRouting shardRouting) { + return oldRoutingTable.searchOnlyReplicas().stream().anyMatch(r -> r.isSameAllocation(shardRouting)); + } + + public void testRemoteRestoreWithActivePrimaryAndSearchOnlyShards() { + final int numOfShards = 10; + final int numAssignedSearchReplicas = 5; + final int numOfSearchReplicas = 1; + + final IndexMetadata remoteMetadata = createIndexMetadata( + "test-remote", + Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_SEARCH_REPLICAS, numOfSearchReplicas) + .build() + ); + // create an initial routing table where all search replicas exist and are assigned, they should get included as is in the restored + // routing. + final Index index = remoteMetadata.getIndex(); + + Map routingTable = new HashMap<>(); + for (int shardNumber = 0; shardNumber < remoteMetadata.getNumberOfShards(); shardNumber++) { + ShardId shardId = new ShardId(index, shardNumber); + final String nodeId = "node " + shardNumber; + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder( + new ShardId(remoteMetadata.getIndex(), shardId.id()) + ); + // add the primary as assigned + ShardRouting primary = ShardRouting.newUnassigned( + shardId, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + builder.addShard(primary.initialize(nodeId + " Primary", null, 0L)); + + // add a search replica for the shard + ShardRouting searchReplicaRouting = ShardRouting.newUnassigned( + shardId, + false, + true, + RecoverySource.EmptyStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") + ); + if (shardNumber < numAssignedSearchReplicas) { + // first five shards add the SR as assigned + builder.addShard(searchReplicaRouting.initialize(nodeId, null, 0L)); + } else { + builder.addShard(searchReplicaRouting); + } + routingTable.put(shardId, builder.build()); + } + IndexRoutingTable.Builder routingTableAfterRestore = new IndexRoutingTable.Builder(remoteMetadata.getIndex()) + .initializeAsRemoteStoreRestore( + remoteMetadata, + new RecoverySource.RemoteStoreRecoverySource( + UUIDs.randomBase64UUID(), + remoteMetadata.getCreationVersion(), + new IndexId(remoteMetadata.getIndex().getName(), remoteMetadata.getIndexUUID()) + ), + routingTable, + false + ); + for (IndexShardRoutingTable indexShardRoutingTable : routingTableAfterRestore.build()) { + assertEquals(numOfSearchReplicas, indexShardRoutingTable.searchOnlyReplicas().size()); + for (ShardRouting shardRouting : indexShardRoutingTable.searchOnlyReplicas()) { + if (shardRouting.shardId().getId() < numAssignedSearchReplicas) { + assertTrue(shardRouting.assignedToNode()); + assertTrue(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } else { + assertTrue(shardRouting.unassigned()); + assertFalse(containsSameRouting(routingTable.get(indexShardRoutingTable.getShardId()), shardRouting)); + } + } + } + } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 96794a83ef762..535adfbff8dcc 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -3011,6 +3011,52 @@ public void testRestoreShardFromRemoteStore(boolean performFlush) throws IOExcep closeShards(target); } + public void testRestoreSearchOnlyShardFromStore() throws IOException { + // this test indexes docs on a primary, refreshes, then recovers a new Search Replica and asserts + // all docs are present + String remoteStorePath = createTempDir().toString(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStorePath + "__test") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStorePath + "__test") + .build(); + IndexShard primary = newStartedShard(true, settings, new InternalEngineFactory()); + indexDoc(primary, "_doc", "1"); + indexDoc(primary, "_doc", "2"); + primary.refresh("test"); + assertDocs(primary, "1", "2"); + + ShardRouting searchReplicaShardRouting = TestShardRouting.newShardRouting( + primary.shardId, + randomAlphaOfLength(10), + false, + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + IndexShard replica = newShard(searchReplicaShardRouting, settings, new NRTReplicationEngineFactory()); + recoverShardFromStore(replica); + searchReplicaShardRouting = replica.routingEntry(); + assertDocs(replica, "1", "2"); + assertEquals( + primary.getLatestReplicationCheckpoint().getSegmentInfosVersion(), + replica.getLatestReplicationCheckpoint().getSegmentInfosVersion() + ); + + // move to unassigned while the replica is active, then reinit from existing store. + searchReplicaShardRouting = ShardRoutingHelper.moveToUnassigned( + searchReplicaShardRouting, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so") + ); + searchReplicaShardRouting = ShardRoutingHelper.initialize(searchReplicaShardRouting, replica.routingEntry().currentNodeId()); + assertEquals(RecoverySource.ExistingStoreRecoverySource.INSTANCE, searchReplicaShardRouting.recoverySource()); + replica = reinitShard(replica, searchReplicaShardRouting); + recoverShardFromStore(replica); + assertDocs(replica, "1", "2"); + closeShards(primary, replica); + } + public void testReaderWrapperIsUsed() throws IOException { IndexShard shard = newStartedShard(true); indexDoc(shard, "_doc", "0", "{\"foo\" : \"bar\"}"); diff --git a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java index 9a000a4eeda72..a6af658be2ca1 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/opensearch/cluster/routing/TestShardRouting.java @@ -342,4 +342,26 @@ public static ShardRouting newShardRouting( -1 ); } + + public static ShardRouting newShardRouting( + ShardId shardId, + String currentNodeId, + boolean primary, + boolean searchOnly, + ShardRoutingState state, + RecoverySource recoverySource + ) { + return new ShardRouting( + shardId, + currentNodeId, + null, + primary, + searchOnly, + state, + recoverySource, + buildUnassignedInfo(state), + buildAllocationId(state), + -1 + ); + } } From 5afb92fc06b5dc68110ccddd49b3ef1468734963 Mon Sep 17 00:00:00 2001 From: "Samuel.G" <1148690954@qq.com> Date: Sat, 11 Jan 2025 06:22:30 +0900 Subject: [PATCH 21/37] Fix case insensitive and escaped query on wildcard (#16827) * fix case insensitive and escaped query on wildcard Signed-off-by: gesong.samuel * add changelog Signed-off-by: gesong.samuel --------- Signed-off-by: gesong.samuel Signed-off-by: Michael Froh Co-authored-by: gesong.samuel Co-authored-by: Michael Froh --- CHANGELOG.md | 1 + .../search/270_wildcard_fieldtype_queries.yml | 127 +++++++++++++++++- .../index/mapper/WildcardFieldMapper.java | 116 ++++++++++++---- 3 files changed, 213 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a46359520e9e1..20e6c03d5a9d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bound the size of cache in deprecation logger ([16702](https://github.com/opensearch-project/OpenSearch/issues/16702)) - Ensure consistency of system flag on IndexMetadata after diff is applied ([#16644](https://github.com/opensearch-project/OpenSearch/pull/16644)) - Skip remote-repositories validations for node-joins when RepositoriesService is not in sync with cluster-state ([#16763](https://github.com/opensearch-project/OpenSearch/pull/16763)) +- Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml index d92538824232d..a85399feefd25 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/270_wildcard_fieldtype_queries.yml @@ -62,6 +62,19 @@ setup: id: 7 body: my_field: "ABCD" + - do: + index: + index: test + id: 8 + body: + my_field: "*" + + - do: + index: + index: test + id: 9 + body: + my_field: "\\*" - do: indices.refresh: {} @@ -223,7 +236,7 @@ setup: wildcard: my_field: value: "*" - - match: { hits.total.value: 6 } + - match: { hits.total.value: 8 } --- "regexp match-all works": - do: @@ -234,7 +247,7 @@ setup: regexp: my_field: value: ".*" - - match: { hits.total.value: 6 } + - match: { hits.total.value: 8 } --- "terms query on wildcard field matches": - do: @@ -270,3 +283,113 @@ setup: - match: { hits.total.value: 2 } - match: { hits.hits.0._id: "5" } - match: { hits.hits.1._id: "7" } +--- +"case insensitive regexp query on wildcard field": + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: "AbCd" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "5" } + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: "AbCd" + case_insensitive: true + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "5" } + - match: { hits.hits.1._id: "7" } +--- +"wildcard query works on values contains escaped characters": + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "\\*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "8" } + + - do: + search: + index: test + body: + query: + wildcard: + my_field: + value: "\\\\\\*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "9" } +--- +"regexp query works on values contains escaped characters": + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: "\\*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "8" } + + - do: + search: + index: test + body: + query: + regexp: + my_field: + value: "\\\\\\*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "9"} +--- +"term query contains escaped characters": + - do: + search: + index: test + body: + query: + term: + my_field: "\\*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "9" } + + - do: + search: + index: test + body: + query: + term: + my_field: "*" + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "8"} +--- +"terms query contains escaped characters": + - do: + search: + index: test + body: + query: + terms: { my_field: ["*"] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "8" } + + - do: + search: + index: test + body: + query: + terms: { my_field: [ "\\*" ] } + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "9" } diff --git a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java index e43e3bda692e7..7342c6f9f23bd 100644 --- a/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/WildcardFieldMapper.java @@ -327,6 +327,25 @@ public boolean incrementToken() throws IOException { * Implements the various query types over wildcard fields. */ public static final class WildcardFieldType extends StringFieldType { + private static final Set WILDCARD_SPECIAL = Set.of('?', '*', '\\'); + private static final Set REGEXP_SPECIAL = Set.of( + '.', + '^', + '$', + '*', + '+', + '?', + '(', + ')', + '[', + ']', + '{', + '}', + '|', + '/', + '\\' + ); + private final int ignoreAbove; private final String nullValue; @@ -438,7 +457,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo if (caseInsensitive) { s = s.toLowerCase(Locale.ROOT); } - return s.equals(finalValue); + return s.equals(performEscape(finalValue, false)); }; } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.ALL) { return existsQuery(context); @@ -454,7 +473,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo }; } - Set requiredNGrams = getRequiredNGrams(finalValue); + Set requiredNGrams = getRequiredNGrams(finalValue, false); Query approximation; if (requiredNGrams.isEmpty()) { // This only happens when all characters are wildcard characters (* or ?), @@ -471,7 +490,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo } // Package-private for testing - static Set getRequiredNGrams(String value) { + static Set getRequiredNGrams(String value, boolean regexpMode) { Set terms = new HashSet<>(); if (value.isEmpty()) { @@ -484,7 +503,7 @@ static Set getRequiredNGrams(String value) { if (!value.startsWith("?") && !value.startsWith("*")) { // Can add prefix term rawSequence = getNonWildcardSequence(value, 0); - currentSequence = performEscape(rawSequence); + currentSequence = performEscape(rawSequence, regexpMode); if (currentSequence.length() == 1) { terms.add(new String(new char[] { 0, currentSequence.charAt(0) })); } else { @@ -496,7 +515,7 @@ static Set getRequiredNGrams(String value) { } while (pos < value.length()) { boolean isEndOfValue = pos + rawSequence.length() == value.length(); - currentSequence = performEscape(rawSequence); + currentSequence = performEscape(rawSequence, regexpMode); if (!currentSequence.isEmpty() && currentSequence.length() < 3 && !isEndOfValue && pos > 0) { // If this is a prefix or suffix of length < 3, then we already have a longer token including the anchor. terms.add(currentSequence); @@ -542,19 +561,42 @@ private static int findNonWildcardSequence(String value, int startFrom) { return value.length(); } - private static String performEscape(String str) { - StringBuilder sb = new StringBuilder(); + /** + * reversed process of quoteWildcard + * @param str target string + * @param regexpMode whether is used for regexp escape + * @return string before escaped + */ + private static String performEscape(String str, boolean regexpMode) { + final StringBuilder sb = new StringBuilder(); + final Set targetChars = regexpMode ? REGEXP_SPECIAL : WILDCARD_SPECIAL; + for (int i = 0; i < str.length(); i++) { if (str.charAt(i) == '\\' && (i + 1) < str.length()) { char c = str.charAt(i + 1); - if (c == '*' || c == '?') { + if (targetChars.contains(c)) { i++; } } sb.append(str.charAt(i)); } - assert !sb.toString().contains("\\*"); - assert !sb.toString().contains("\\?"); + return sb.toString(); + } + + /** + * manually escape instead of call String.replace for better performance + * only for term query + * @param str target string + * @return escaped string + */ + private static String quoteWildcard(String str) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < str.length(); i++) { + if (WILDCARD_SPECIAL.contains(str.charAt(i))) { + sb.append('\\'); + } + sb.append(str.charAt(i)); + } return sb.toString(); } @@ -568,11 +610,10 @@ public Query regexpQuery( QueryShardContext context ) { NamedAnalyzer normalizer = normalizer(); - if (normalizer != null) { - value = normalizer.normalize(name(), value).utf8ToString(); - } + final String finalValue = normalizer != null ? value = normalizer.normalize(name(), value).utf8ToString() : value; + final boolean caseInsensitive = matchFlags == RegExp.ASCII_CASE_INSENSITIVE; - RegExp regExp = new RegExp(value, syntaxFlags, matchFlags); + RegExp regExp = new RegExp(finalValue, syntaxFlags, matchFlags); Automaton automaton = regExp.toAutomaton(maxDeterminizedStates); CompiledAutomaton compiledAutomaton = new CompiledAutomaton(automaton); @@ -581,6 +622,14 @@ public Query regexpQuery( return existsQuery(context); } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.NONE) { return new MatchNoDocsQuery("Regular expression matches nothing"); + } else if (compiledAutomaton.type == CompiledAutomaton.AUTOMATON_TYPE.SINGLE) { + // when type equals SINGLE, #compiledAutomaton.runAutomaton is null + regexpPredicate = s -> { + if (caseInsensitive) { + s = s.toLowerCase(Locale.ROOT); + } + return s.equals(performEscape(finalValue, true)); + }; } else { regexpPredicate = s -> { BytesRef valueBytes = BytesRefs.toBytesRef(s); @@ -588,11 +637,11 @@ public Query regexpQuery( }; } - Query approximation = regexpToQuery(name(), regExp); + Query approximation = regexpToQuery(name(), regExp, caseInsensitive); if (approximation instanceof MatchAllDocsQuery) { approximation = existsQuery(context); } - return new WildcardMatchingQuery(name(), approximation, regexpPredicate, "/" + value + "/", context, this); + return new WildcardMatchingQuery(name(), approximation, regexpPredicate, "/" + finalValue + "/", context, this); } /** @@ -602,16 +651,16 @@ public Query regexpQuery( * @param regExp a parsed node in the {@link RegExp} tree * @return a query that matches on the known required parts of the given regular expression */ - private static Query regexpToQuery(String fieldName, RegExp regExp) { + private static Query regexpToQuery(String fieldName, RegExp regExp, boolean caseInsensitive) { BooleanQuery query; if (Objects.requireNonNull(regExp.kind) == RegExp.Kind.REGEXP_UNION) { List clauses = new ArrayList<>(); while (regExp.exp1.kind == RegExp.Kind.REGEXP_UNION) { - clauses.add(regexpToQuery(fieldName, regExp.exp2)); + clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive)); regExp = regExp.exp1; } - clauses.add(regexpToQuery(fieldName, regExp.exp2)); - clauses.add(regexpToQuery(fieldName, regExp.exp1)); + clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive)); + clauses.add(regexpToQuery(fieldName, regExp.exp1, caseInsensitive)); BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (int i = clauses.size() - 1; i >= 0; i--) { Query clause = clauses.get(i); @@ -623,18 +672,24 @@ private static Query regexpToQuery(String fieldName, RegExp regExp) { query = builder.build(); } else if (regExp.kind == RegExp.Kind.REGEXP_STRING) { BooleanQuery.Builder builder = new BooleanQuery.Builder(); - for (String string : getRequiredNGrams("*" + regExp.s + "*")) { - builder.add(new TermQuery(new Term(fieldName, string)), BooleanClause.Occur.FILTER); + for (String string : getRequiredNGrams("*" + regExp.s + "*", true)) { + final Query subQuery; + if (caseInsensitive) { + subQuery = AutomatonQueries.caseInsensitiveTermQuery(new Term(fieldName, string)); + } else { + subQuery = new TermQuery(new Term(fieldName, string)); + } + builder.add(subQuery, BooleanClause.Occur.FILTER); } query = builder.build(); } else if (regExp.kind == RegExp.Kind.REGEXP_CONCATENATION) { List clauses = new ArrayList<>(); while (regExp.exp1.kind == RegExp.Kind.REGEXP_CONCATENATION) { - clauses.add(regexpToQuery(fieldName, regExp.exp2)); + clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive)); regExp = regExp.exp1; } - clauses.add(regexpToQuery(fieldName, regExp.exp2)); - clauses.add(regexpToQuery(fieldName, regExp.exp1)); + clauses.add(regexpToQuery(fieldName, regExp.exp2, caseInsensitive)); + clauses.add(regexpToQuery(fieldName, regExp.exp1, caseInsensitive)); BooleanQuery.Builder builder = new BooleanQuery.Builder(); for (int i = clauses.size() - 1; i >= 0; i--) { Query clause = clauses.get(i); @@ -645,7 +700,7 @@ private static Query regexpToQuery(String fieldName, RegExp regExp) { query = builder.build(); } else if ((regExp.kind == RegExp.Kind.REGEXP_REPEAT_MIN || regExp.kind == RegExp.Kind.REGEXP_REPEAT_MINMAX) && regExp.min > 0) { - return regexpToQuery(fieldName, regExp.exp1); + return regexpToQuery(fieldName, regExp.exp1, caseInsensitive); } else { return new MatchAllDocsQuery(); } @@ -664,12 +719,12 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower @Override public Query termQueryCaseInsensitive(Object value, QueryShardContext context) { - return wildcardQuery(BytesRefs.toString(value), MultiTermQuery.CONSTANT_SCORE_REWRITE, true, context); + return wildcardQuery(quoteWildcard(BytesRefs.toString(value)), MultiTermQuery.CONSTANT_SCORE_REWRITE, true, context); } @Override public Query termQuery(Object value, QueryShardContext context) { - return wildcardQuery(BytesRefs.toString(value), MultiTermQuery.CONSTANT_SCORE_REWRITE, false, context); + return wildcardQuery(quoteWildcard(BytesRefs.toString(value)), MultiTermQuery.CONSTANT_SCORE_REWRITE, false, context); } @Override @@ -679,7 +734,10 @@ public Query termsQuery(List values, QueryShardContext context) { StringBuilder pattern = new StringBuilder(); for (Object value : values) { String stringVal = BytesRefs.toString(value); - builder.add(matchAllTermsQuery(name(), getRequiredNGrams(stringVal), false), BooleanClause.Occur.SHOULD); + builder.add( + matchAllTermsQuery(name(), getRequiredNGrams(quoteWildcard(stringVal), false), false), + BooleanClause.Occur.SHOULD + ); expectedValues.add(stringVal); if (pattern.length() > 0) { pattern.append('|'); From 6dc63c5281f631c8a1921bbd60f2bf975a82e0a5 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 10 Jan 2025 17:37:40 -0500 Subject: [PATCH 22/37] Bump opentelemetry from 1.41.0 to 1.46.0 and opentelemetry-semconv from 1.27.0-alpha to 1.29.0-alpha (#17000) Signed-off-by: Andriy Redko --- CHANGELOG.md | 2 ++ gradle/libs.versions.toml | 4 ++-- plugins/telemetry-otel/build.gradle | 1 + .../telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 | 1 + .../opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 | 1 - .../opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 | 1 + .../opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 | 1 - .../opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 | 1 + .../telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 | 1 - .../telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 | 1 - .../licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 | 1 + 31 files changed, 19 insertions(+), 16 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 20e6c03d5a9d6..e20fda7bfdb18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919)) - Bump `ch.qos.logback:logback-core` from 1.5.12 to 1.5.16 ([#16951](https://github.com/opensearch-project/OpenSearch/pull/16951)) - Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952)) +- Bump `opentelemetry` from 1.41.0 to 1.46.0 ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700)) +- Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index f357fb248520c..1cd2f8d87e1d4 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -78,8 +78,8 @@ jzlib = "1.1.3" resteasy = "6.2.4.Final" # opentelemetry dependencies -opentelemetry = "1.41.0" -opentelemetrysemconv = "1.27.0-alpha" +opentelemetry = "1.46.0" +opentelemetrysemconv = "1.29.0-alpha" # arrow dependencies arrow = "17.0.0" diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 3aba7d64cd96d..54f4f2f897562 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -88,6 +88,7 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider', 'io.opentelemetry.sdk.autoconfigure.spi.internal.AutoConfigureListener', 'io.opentelemetry.sdk.autoconfigure.spi.internal.ComponentProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.internal.DefaultConfigProperties', 'io.opentelemetry.sdk.autoconfigure.spi.internal.StructuredConfigProperties' ) } diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 deleted file mode 100644 index ead8fb235fa12..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec5ad3b420c9fba4b340e85a3199fd0f2accd023 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..b2d1d3575fcde --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.46.0.jar.sha1 @@ -0,0 +1 @@ +afd2d5781454088400cceabbe84f7a9b29d27161 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 deleted file mode 100644 index b601a4fb5246f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.41.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fd387313cc37a6e93062e9a80a2526634d22cb19 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..e89de4cb29f16 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.46.0-alpha.jar.sha1 @@ -0,0 +1 @@ +1a708444d2818ac1a47767a2b35d74ef55d26af8 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 deleted file mode 100644 index 74b7cb25cdfe5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3d7cf15ef425053e24e825160ca7b4ac08d721aa \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..df658f4c87ac2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.46.0.jar.sha1 @@ -0,0 +1 @@ +8cee1fa7ec9129f7b252595c612c19f4570d567f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 deleted file mode 100644 index d8d8f75850cb6..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cf92f4c1b60c2359c12f6f323f6a2a623c333910 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..e6503871bff53 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.46.0.jar.sha1 @@ -0,0 +1 @@ +2e2d8f3b51b1a2b1184f11d9059e129c5e39147a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 deleted file mode 100644 index 3e1212943f894..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dee21440b811004ecc1c36c1cd44f9d3494546c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..65757fff8b0e7 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.46.0.jar.sha1 @@ -0,0 +1 @@ +a0ef76a383a086b812395ca5a5cdf94804a59a3f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 deleted file mode 100644 index 21a29cc8445e5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d86e60b6d49e389ebe5797d42a7288a20d30c162 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..0fc550e83748e --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.46.0.jar.sha1 @@ -0,0 +1 @@ +1122a5ea0562147547ddf0eb28e1035d549c0ea0 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 deleted file mode 100644 index ae522ac698aa8..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeba3075b8dfd97779edadc0a3711d999bb0e396 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..a01f85d9e1258 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.46.0.jar.sha1 @@ -0,0 +1 @@ +abeb93b8b6d2cb0007b1d6122325f94a11e61ca4 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 deleted file mode 100644 index a741d0a167d60..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -368d7905d6a0a313c63e3a91f895a3a08500519e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..8c755281bab05 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.46.0.jar.sha1 @@ -0,0 +1 @@ +32a0fe0fa7cd9831b502075f27c1fe6d28280cdb \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 deleted file mode 100644 index 972e7de1c74be..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c740e8f7d0d914d6acd310ac53901bb8753c6e8d \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..a41c756db7096 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.46.0.jar.sha1 @@ -0,0 +1 @@ +b3a77fff1084177c4f5099bbb7db6181d6efd752 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 deleted file mode 100644 index c56ca0b9e8169..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b820861f85ba83db0ad896c47f723208d7473d5a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..1bd211a143c03 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.46.0.jar.sha1 @@ -0,0 +1 @@ +1d353ee4e980ff77c742350fc7000b732b6c6b3f \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 deleted file mode 100644 index 39db6cb73727f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f88ee292f5605c87dfe85c8d90131bce9f0b3b8e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..084a703a4d4cc --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.46.0.jar.sha1 @@ -0,0 +1 @@ +1bd9bb4f3ce9ac573613b353a78d51491cd02bbd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 deleted file mode 100644 index 6dcd496e033d3..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d1200befb28e3e9f61073ac3de23cc55e509dc7 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..1fe3c4842d41d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.46.0.jar.sha1 @@ -0,0 +1 @@ +475d900ffd0567a7ddf2452290b2e5d51ac35c58 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 deleted file mode 100644 index 161e400f87077..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.41.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d9bbc2e2e800317d72fbf3141ae8391e95fa6229 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 new file mode 100644 index 0000000000000..da00b35812afb --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.46.0.jar.sha1 @@ -0,0 +1 @@ +c6e39faabf0741780189861156d0a7763e942796 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 deleted file mode 100644 index e986b4b53388e..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.27.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -906d916bee46f60260c09314284b5948c54a0662 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..3326c366cb4c9 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.29.0-alpha.jar.sha1 @@ -0,0 +1 @@ +613d7f7743eb2b974680ad1af1685802e6a7cb58 \ No newline at end of file From fccd6c54c14dabc46483f1b6ec3f3b02d08edfdd Mon Sep 17 00:00:00 2001 From: kkewwei Date: Sat, 11 Jan 2025 06:45:52 +0800 Subject: [PATCH 23/37] TransportBulkAction.doRun() (#16950) Signed-off-by: kkewwei Signed-off-by: kkewwei --- .../action/bulk/TransportBulkAction.java | 23 +++++++------------ 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java index 19ffb12859183..db509afb68da9 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportBulkAction.java @@ -532,6 +532,8 @@ protected void doRun() { } final ConcreteIndices concreteIndices = new ConcreteIndices(clusterState, indexNameExpressionResolver); Metadata metadata = clusterState.metadata(); + // go over all the requests and create a ShardId -> Operations mapping + Map> requestsByShard = new HashMap<>(); for (int i = 0; i < bulkRequest.requests.size(); i++) { DocWriteRequest docWriteRequest = bulkRequest.requests.get(i); // the request can only be null because we set it to null in the previous step, so it gets ignored @@ -587,6 +589,12 @@ protected void doRun() { default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); } + + ShardId shardId = clusterService.operationRouting() + .indexShards(clusterState, concreteIndex.getName(), docWriteRequest.id(), docWriteRequest.routing()) + .shardId(); + List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); + shardRequests.add(new BulkItemRequest(i, docWriteRequest)); } catch (OpenSearchParseException | IllegalArgumentException | RoutingMissingException e) { BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.id(), e); BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); @@ -596,21 +604,6 @@ protected void doRun() { } } - // first, go over all the requests and create a ShardId -> Operations mapping - Map> requestsByShard = new HashMap<>(); - for (int i = 0; i < bulkRequest.requests.size(); i++) { - DocWriteRequest request = bulkRequest.requests.get(i); - if (request == null) { - continue; - } - String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); - ShardId shardId = clusterService.operationRouting() - .indexShards(clusterState, concreteIndex, request.id(), request.routing()) - .shardId(); - List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); - shardRequests.add(new BulkItemRequest(i, request)); - } - if (requestsByShard.isEmpty()) { BulkItemResponse[] response = responses.toArray(new BulkItemResponse[responses.length()]); long tookMillis = buildTookInMillis(startTimeNanos); From 8d5e1a3972ac34d769fff6618d26f9f9e36b06b7 Mon Sep 17 00:00:00 2001 From: Ivan Brusic Date: Fri, 10 Jan 2025 22:16:34 -0800 Subject: [PATCH 24/37] Show only intersecting buckets to the Adjacency matrix aggregation (#11733) Signed-off-by: Ivan Brusic --- .../70_adjacency_matrix.yml | 37 +++++++++ .../AdjacencyMatrixAggregationBuilder.java | 82 +++++++++++++++++-- .../adjacency/AdjacencyMatrixAggregator.java | 19 +++-- .../AdjacencyMatrixAggregatorFactory.java | 16 +++- ...djacencyMatrixAggregationBuilderTests.java | 21 ++++- .../metrics/AdjacencyMatrixTests.java | 18 ++++ 6 files changed, 177 insertions(+), 16 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml index f8fa537ed91bf..ccd194eff6f51 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml @@ -125,3 +125,40 @@ setup: - match: { aggregations.conns.buckets.3.doc_count: 1 } - match: { aggregations.conns.buckets.3.key: "4" } + + +--- +"Show only intersections": + - skip: + version: " - 2.99.99" + reason: "show_only_intersecting was added in 3.0.0" + features: node_selector + - do: + node_selector: + version: "3.0.0 - " + search: + index: test + rest_total_hits_as_int: true + body: + size: 0 + aggs: + conns: + adjacency_matrix: + show_only_intersecting: true + filters: + 1: + term: + num: 1 + 2: + term: + num: 2 + 4: + term: + num: 4 + + - match: { hits.total: 3 } + + - length: { aggregations.conns.buckets: 1 } + + - match: { aggregations.conns.buckets.0.doc_count: 1 } + - match: { aggregations.conns.buckets.0.key: "1&2" } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index 743d0023364fa..1b6a7e1158b83 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -32,6 +32,7 @@ package org.opensearch.search.aggregations.bucket.adjacency; +import org.opensearch.Version; import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -71,7 +72,10 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde private static final ParseField SEPARATOR_FIELD = new ParseField("separator"); private static final ParseField FILTERS_FIELD = new ParseField("filters"); + private static final ParseField SHOW_ONLY_INTERSECTING = new ParseField("show_only_intersecting"); + private List filters; + private boolean showOnlyIntersecting = false; private String separator = DEFAULT_SEPARATOR; private static final ObjectParser PARSER = ObjectParser.fromBuilder( @@ -81,6 +85,10 @@ public class AdjacencyMatrixAggregationBuilder extends AbstractAggregationBuilde static { PARSER.declareString(AdjacencyMatrixAggregationBuilder::separator, SEPARATOR_FIELD); PARSER.declareNamedObjects(AdjacencyMatrixAggregationBuilder::setFiltersAsList, KeyedFilter.PARSER, FILTERS_FIELD); + PARSER.declareBoolean( + AdjacencyMatrixAggregationBuilder::setShowOnlyIntersecting, + AdjacencyMatrixAggregationBuilder.SHOW_ONLY_INTERSECTING + ); } public static AggregationBuilder parse(XContentParser parser, String name) throws IOException { @@ -115,6 +123,7 @@ protected AdjacencyMatrixAggregationBuilder( super(clone, factoriesBuilder, metadata); this.filters = new ArrayList<>(clone.filters); this.separator = clone.separator; + this.showOnlyIntersecting = clone.showOnlyIntersecting; } @Override @@ -138,6 +147,40 @@ public AdjacencyMatrixAggregationBuilder(String name, String separator, Map filters, boolean showOnlyIntersecting) { + this(name, DEFAULT_SEPARATOR, filters, showOnlyIntersecting); + } + + /** + * @param name + * the name of this aggregation + * @param separator + * the string used to separate keys in intersections buckets e.g. + * & character for keyed filters A and B would return an + * intersection bucket named A&B + * @param filters + * the filters and their key to use with this aggregation. + * @param showOnlyIntersecting + * show only the buckets that intersection multiple documents + */ + public AdjacencyMatrixAggregationBuilder( + String name, + String separator, + Map filters, + boolean showOnlyIntersecting + ) { + this(name, separator, filters); + this.showOnlyIntersecting = showOnlyIntersecting; + } + /** * Read from a stream. */ @@ -145,6 +188,9 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException { super(in); int filtersSize = in.readVInt(); separator = in.readString(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + showOnlyIntersecting = in.readBoolean(); + } filters = new ArrayList<>(filtersSize); for (int i = 0; i < filtersSize; i++) { filters.add(new KeyedFilter(in)); @@ -155,6 +201,9 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(filters.size()); out.writeString(separator); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(showOnlyIntersecting); + } for (KeyedFilter keyedFilter : filters) { keyedFilter.writeTo(out); } @@ -185,6 +234,11 @@ private AdjacencyMatrixAggregationBuilder setFiltersAsList(List fil return this; } + public AdjacencyMatrixAggregationBuilder setShowOnlyIntersecting(boolean showOnlyIntersecting) { + this.showOnlyIntersecting = showOnlyIntersecting; + return this; + } + /** * Set the separator used to join pairs of bucket keys */ @@ -214,6 +268,10 @@ public Map filters() { return result; } + public boolean isShowOnlyIntersecting() { + return showOnlyIntersecting; + } + @Override protected AdjacencyMatrixAggregationBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException { boolean modified = false; @@ -224,7 +282,9 @@ protected AdjacencyMatrixAggregationBuilder doRewrite(QueryRewriteContext queryS rewrittenFilters.add(new KeyedFilter(kf.key(), rewritten)); } if (modified) { - return new AdjacencyMatrixAggregationBuilder(name).separator(separator).setFiltersAsList(rewrittenFilters); + return new AdjacencyMatrixAggregationBuilder(name).separator(separator) + .setFiltersAsList(rewrittenFilters) + .setShowOnlyIntersecting(showOnlyIntersecting); } return this; } @@ -245,7 +305,16 @@ protected AggregatorFactory doBuild(QueryShardContext queryShardContext, Aggrega + "] index level setting." ); } - return new AdjacencyMatrixAggregatorFactory(name, filters, separator, queryShardContext, parent, subFactoriesBuilder, metadata); + return new AdjacencyMatrixAggregatorFactory( + name, + filters, + showOnlyIntersecting, + separator, + queryShardContext, + parent, + subFactoriesBuilder, + metadata + ); } @Override @@ -257,7 +326,8 @@ public BucketCardinality bucketCardinality() { protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(SEPARATOR_FIELD.getPreferredName(), separator); - builder.startObject(AdjacencyMatrixAggregator.FILTERS_FIELD.getPreferredName()); + builder.field(SHOW_ONLY_INTERSECTING.getPreferredName(), showOnlyIntersecting); + builder.startObject(FILTERS_FIELD.getPreferredName()); for (KeyedFilter keyedFilter : filters) { builder.field(keyedFilter.key(), keyedFilter.filter()); } @@ -268,7 +338,7 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param @Override public int hashCode() { - return Objects.hash(super.hashCode(), filters, separator); + return Objects.hash(super.hashCode(), filters, showOnlyIntersecting, separator); } @Override @@ -277,7 +347,9 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; AdjacencyMatrixAggregationBuilder other = (AdjacencyMatrixAggregationBuilder) obj; - return Objects.equals(filters, other.filters) && Objects.equals(separator, other.separator); + return Objects.equals(filters, other.filters) + && Objects.equals(separator, other.separator) + && Objects.equals(showOnlyIntersecting, other.showOnlyIntersecting); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java index ef1795f425240..f82ee9dc242fb 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregator.java @@ -36,7 +36,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.opensearch.common.lucene.Lucene; -import org.opensearch.core.ParseField; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; @@ -70,8 +69,6 @@ */ public class AdjacencyMatrixAggregator extends BucketsAggregator { - public static final ParseField FILTERS_FIELD = new ParseField("filters"); - /** * A keyed filter * @@ -145,6 +142,8 @@ public boolean equals(Object obj) { private final String[] keys; private final Weight[] filters; + + private final boolean showOnlyIntersecting; private final int totalNumKeys; private final int totalNumIntersections; private final String separator; @@ -155,6 +154,7 @@ public AdjacencyMatrixAggregator( String separator, String[] keys, Weight[] filters, + boolean showOnlyIntersecting, SearchContext context, Aggregator parent, Map metadata @@ -163,6 +163,7 @@ public AdjacencyMatrixAggregator( this.separator = separator; this.keys = keys; this.filters = filters; + this.showOnlyIntersecting = showOnlyIntersecting; this.totalNumIntersections = ((keys.length * keys.length) - keys.length) / 2; this.totalNumKeys = keys.length + totalNumIntersections; } @@ -177,10 +178,12 @@ public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBuc return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { - // Check each of the provided filters - for (int i = 0; i < bits.length; i++) { - if (bits[i].get(doc)) { - collectBucket(sub, doc, bucketOrd(bucket, i)); + if (!showOnlyIntersecting) { + // Check each of the provided filters + for (int i = 0; i < bits.length; i++) { + if (bits[i].get(doc)) { + collectBucket(sub, doc, bucketOrd(bucket, i)); + } } } // Check all the possible intersections of the provided filters @@ -229,7 +232,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I for (int i = 0; i < keys.length; i++) { long bucketOrd = bucketOrd(owningBucketOrds[owningBucketOrdIdx], i); long docCount = bucketDocCount(bucketOrd); - // Empty buckets are not returned because this aggregation will commonly be used under a + // Empty buckets are not returned because this aggregation will commonly be used under // a date-histogram where we will look for transactions over time and can expect many // empty buckets. if (docCount > 0) { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java index 99ffb563ba2a8..bae86f3fcdfc1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregatorFactory.java @@ -57,11 +57,14 @@ public class AdjacencyMatrixAggregatorFactory extends AggregatorFactory { private final String[] keys; private final Weight[] weights; + + private final boolean showOnlyIntersecting; private final String separator; public AdjacencyMatrixAggregatorFactory( String name, List filters, + boolean showOnlyIntersecting, String separator, QueryShardContext queryShardContext, AggregatorFactory parent, @@ -79,6 +82,7 @@ public AdjacencyMatrixAggregatorFactory( Query filter = keyedFilter.filter().toQuery(queryShardContext); weights[i] = contextSearcher.createWeight(contextSearcher.rewrite(filter), ScoreMode.COMPLETE_NO_SCORES, 1f); } + this.showOnlyIntersecting = showOnlyIntersecting; } @Override @@ -88,7 +92,17 @@ public Aggregator createInternal( CardinalityUpperBound cardinality, Map metadata ) throws IOException { - return new AdjacencyMatrixAggregator(name, factories, separator, keys, weights, searchContext, parent, metadata); + return new AdjacencyMatrixAggregator( + name, + factories, + separator, + keys, + weights, + showOnlyIntersecting, + searchContext, + parent, + metadata + ); } @Override diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java index b2025ae5f03c1..e7c1de0123c9e 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilderTests.java @@ -57,7 +57,7 @@ public class AdjacencyMatrixAggregationBuilderTests extends OpenSearchTestCase { public void testFilterSizeLimitation() throws Exception { - // filter size grater than max size should thrown a exception + // filter size grater than max size should throw an exception QueryShardContext queryShardContext = mock(QueryShardContext.class); IndexShard indexShard = mock(IndexShard.class); Settings settings = Settings.builder() @@ -94,7 +94,7 @@ public void testFilterSizeLimitation() throws Exception { ) ); - // filter size not grater than max size should return an instance of AdjacencyMatrixAggregatorFactory + // filter size not greater than max size should return an instance of AdjacencyMatrixAggregatorFactory Map emptyFilters = Collections.emptyMap(); AdjacencyMatrixAggregationBuilder aggregationBuilder = new AdjacencyMatrixAggregationBuilder("dummy", emptyFilters); @@ -106,4 +106,21 @@ public void testFilterSizeLimitation() throws Exception { + "removed in a future release! See the breaking changes documentation for the next major version." ); } + + public void testShowOnlyIntersecting() throws Exception { + QueryShardContext queryShardContext = mock(QueryShardContext.class); + + Map filters = new HashMap<>(3); + for (int i = 0; i < 2; i++) { + QueryBuilder queryBuilder = mock(QueryBuilder.class); + // return builder itself to skip rewrite + when(queryBuilder.rewrite(queryShardContext)).thenReturn(queryBuilder); + filters.put("filter" + i, queryBuilder); + } + AdjacencyMatrixAggregationBuilder builder = new AdjacencyMatrixAggregationBuilder("dummy", filters, true); + assertTrue(builder.isShowOnlyIntersecting()); + + builder = new AdjacencyMatrixAggregationBuilder("dummy", filters, false); + assertFalse(builder.isShowOnlyIntersecting()); + } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java b/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java index c5cf56f6caff7..38e53d65a69e6 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/metrics/AdjacencyMatrixTests.java @@ -68,4 +68,22 @@ public void testFiltersSameMap() { assertEquals(original, builder.filters()); assert original != builder.filters(); } + + public void testShowOnlyIntersecting() { + Map original = new HashMap<>(); + original.put("bbb", new MatchNoneQueryBuilder()); + original.put("aaa", new MatchNoneQueryBuilder()); + AdjacencyMatrixAggregationBuilder builder; + builder = new AdjacencyMatrixAggregationBuilder("my-agg", "&", original, true); + assertTrue(builder.isShowOnlyIntersecting()); + } + + public void testShowOnlyIntersectingAsFalse() { + Map original = new HashMap<>(); + original.put("bbb", new MatchNoneQueryBuilder()); + original.put("aaa", new MatchNoneQueryBuilder()); + AdjacencyMatrixAggregationBuilder builder; + builder = new AdjacencyMatrixAggregationBuilder("my-agg", original, false); + assertFalse(builder.isShowOnlyIntersecting()); + } } From 26465e87ed60acc8fd7c65e3559b14e8ccd59fbf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 10:41:36 -0500 Subject: [PATCH 25/37] Bump com.google.re2j:re2j from 1.7 to 1.8 in /plugins/repository-hdfs (#17012) * Bump com.google.re2j:re2j from 1.7 to 1.8 in /plugins/repository-hdfs Bumps [com.google.re2j:re2j](https://github.com/google/re2j) from 1.7 to 1.8. - [Release notes](https://github.com/google/re2j/releases) - [Commits](https://github.com/google/re2j/compare/re2j-1.7...re2j-1.8) --- updated-dependencies: - dependency-name: com.google.re2j:re2j dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 1 + plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1 | 1 - plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index e20fda7bfdb18..5b07e527ac712 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -63,6 +63,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-core-http-netty` from 1.15.5 to 1.15.7 ([#16952](https://github.com/opensearch-project/OpenSearch/pull/16952)) - Bump `opentelemetry` from 1.41.0 to 1.46.0 ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700)) - Bump `opentelemetry-semconv` from 1.27.0-alpha to 1.29.0-alpha ([#16700](https://github.com/opensearch-project/OpenSearch/pull/16700)) +- Bump `com.google.re2j:re2j` from 1.7 to 1.8 ([#17012](https://github.com/opensearch-project/OpenSearch/pull/17012)) ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 441c6ae998406..c2685a525c8ba 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -77,7 +77,7 @@ dependencies { api 'org.apache.commons:commons-configuration2:2.11.0' api "commons-io:commons-io:${versions.commonsio}" api 'org.apache.commons:commons-lang3:3.17.0' - implementation 'com.google.re2j:re2j:1.7' + implementation 'com.google.re2j:re2j:1.8' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" api "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" diff --git a/plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1 b/plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1 deleted file mode 100644 index eb858e3677e30..0000000000000 --- a/plugins/repository-hdfs/licenses/re2j-1.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2949632c1b4acce0d7784f28e3152e9cf3c2ec7a \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 b/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 new file mode 100644 index 0000000000000..8887078965f56 --- /dev/null +++ b/plugins/repository-hdfs/licenses/re2j-1.8.jar.sha1 @@ -0,0 +1 @@ +12c25e923e9e4fb1575a7640a2698745c6f19a94 \ No newline at end of file From f98f4267c35ec2d5567f189944dda5671425e46e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:09:34 -0500 Subject: [PATCH 26/37] Bump com.nimbusds:oauth2-oidc-sdk from 11.20.1 to 11.21 in /plugins/repository-azure (#17010) * Bump com.nimbusds:oauth2-oidc-sdk in /plugins/repository-azure Bumps [com.nimbusds:oauth2-oidc-sdk](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions) from 11.20.1 to 11.21. - [Changelog](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/oauth-2.0-sdk-with-openid-connect-extensions/branches/compare/11.21..11.20.1) --- updated-dependencies: - dependency-name: com.nimbusds:oauth2-oidc-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- plugins/repository-azure/build.gradle | 2 +- .../repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 | 1 - .../repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 | 1 + 4 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b07e527ac712..f1866ea07a352 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -55,7 +55,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.gradle.develocity` from 3.18.2 to 3.19 ([#16855](https://github.com/opensearch-project/OpenSearch/pull/16855)) - Bump `org.jline:jline` from 3.27.1 to 3.28.0 ([#16857](https://github.com/opensearch-project/OpenSearch/pull/16857)) - Bump `com.azure:azure-core` from 1.51.0 to 1.54.1 ([#16856](https://github.com/opensearch-project/OpenSearch/pull/16856)) -- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.20.1 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895)) +- Bump `com.nimbusds:oauth2-oidc-sdk` from 11.19.1 to 11.21 ([#16895](https://github.com/opensearch-project/OpenSearch/pull/16895), [#17010](https://github.com/opensearch-project/OpenSearch/pull/17010)) - Bump `com.netflix.nebula.ospackage-base` from 11.10.0 to 11.10.1 ([#16896](https://github.com/opensearch-project/OpenSearch/pull/16896)) - Bump `com.microsoft.azure:msal4j` from 1.17.2 to 1.18.0 ([#16918](https://github.com/opensearch-project/OpenSearch/pull/16918)) - Bump `org.apache.commons:commons-text` from 1.12.0 to 1.13.0 ([#16919](https://github.com/opensearch-project/OpenSearch/pull/16919)) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index c6b303f22112e..332651e37cfa4 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -62,7 +62,7 @@ dependencies { api 'com.microsoft.azure:msal4j-persistence-extension:1.3.0' api "net.java.dev.jna:jna-platform:${versions.jna}" api 'com.microsoft.azure:msal4j:1.18.0' - api 'com.nimbusds:oauth2-oidc-sdk:11.20.1' + api 'com.nimbusds:oauth2-oidc-sdk:11.21' api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 deleted file mode 100644 index 7527d31eb1d37..0000000000000 --- a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.20.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d1ecd62d31945534a7cd63062c3c48ff0df9c43 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 new file mode 100644 index 0000000000000..9736182141a0a --- /dev/null +++ b/plugins/repository-azure/licenses/oauth2-oidc-sdk-11.21.jar.sha1 @@ -0,0 +1 @@ +97bec173d2a199fdd7f5c1f3a61f7ccc2e992fc1 \ No newline at end of file From a609e634a348b76386fb11936bbe8c4b38ea72d0 Mon Sep 17 00:00:00 2001 From: Ralph Ursprung <39383228+rursprung@users.noreply.github.com> Date: Mon, 13 Jan 2025 22:12:13 +0100 Subject: [PATCH 27/37] improve `PhoneNumberAnalyzerTests#testTelPrefixSearch` (#17016) this way we ensure that it doesn't include any additional tokens which we don't want. this is a follow-up to commit 4d943993ac9 / #16993. Signed-off-by: Ralph Ursprung --- .../opensearch/analysis/phone/PhoneNumberAnalyzerTests.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java index d55c0b2ce7d2a..503cee9cc710f 100644 --- a/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java +++ b/plugins/analysis-phonenumber/src/test/java/org/opensearch/analysis/phone/PhoneNumberAnalyzerTests.java @@ -159,11 +159,11 @@ public void testSipWithoutDomainPart() throws IOException { } public void testTelPrefix() throws IOException { - assertTokensInclude("tel:+1228", Arrays.asList("1228", "122", "228")); + assertTokensInclude(phoneAnalyzer, "tel:+1228", Arrays.asList("tel:+1228", "tel:", "1228", "122", "228")); } public void testTelPrefixSearch() throws IOException { - assertTokensInclude("tel:+1228", Arrays.asList("1228")); + assertTokensAreInAnyOrder(phoneSearchAnalyzer, "tel:+1228", Arrays.asList("tel:+1228", "1228")); } public void testNumberPrefix() throws IOException { From f9c239d340423099699be52aa1594ef37e35005f Mon Sep 17 00:00:00 2001 From: Michael Froh Date: Tue, 14 Jan 2025 15:53:51 -0800 Subject: [PATCH 28/37] Filter shards for sliced search at coordinator (#16771) * Filter shards for sliced search at coordinator Prior to this commit, a sliced search would fan out to every shard, then apply a MatchNoDocsQuery filter on shards that don't correspond to the current slice. This still creates a (useless) search context on each shard for every slice, though. For a long-running sliced scroll, this can quickly exhaust the number of available scroll contexts. This change avoids fanning out to all the shards by checking at the coordinator if a shard is matched by the current slice. This should reduce the number of open scroll contexts to max(numShards, numSlices) instead of numShards * numSlices. --------- Signed-off-by: Michael Froh --- CHANGELOG.md | 1 + .../rest-api-spec/api/search_shards.json | 3 + .../test/search_shards/20_slice.yml | 88 +++++++++++++++++++ .../shards/ClusterSearchShardsRequest.java | 28 +++++- .../TransportClusterSearchShardsAction.java | 2 +- ...TransportFieldCapabilitiesIndexAction.java | 3 +- .../action/search/TransportSearchAction.java | 11 ++- .../cluster/routing/OperationRouting.java | 39 ++++++-- .../RestClusterSearchShardsAction.java | 8 ++ .../opensearch/search/slice/SliceBuilder.java | 39 ++++---- .../search/TransportSearchActionTests.java | 5 ++ .../routing/OperationRoutingTests.java | 38 ++++---- 12 files changed, 219 insertions(+), 46 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index f1866ea07a352..9fd5efdc986d1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Changed - Indexed IP field supports `terms_query` with more than 1025 IP masks [#16391](https://github.com/opensearch-project/OpenSearch/pull/16391) - Make entries for dependencies from server/build.gradle to gradle version catalog ([#16707](https://github.com/opensearch-project/OpenSearch/pull/16707)) +- Sliced search only fans out to shards matched by the selected slice, reducing open search contexts ([#16771](https://github.com/opensearch-project/OpenSearch/pull/16771)) - Allow extended plugins to be optional ([#16909](https://github.com/opensearch-project/OpenSearch/pull/16909)) - Use the correct type to widen the sort fields when merging top docs ([#16881](https://github.com/opensearch-project/OpenSearch/pull/16881)) - Limit reader writer separation to remote store enabled clusters [#16760](https://github.com/opensearch-project/OpenSearch/pull/16760) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json index 74b7055b4c4b0..9d3d420e8945c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_shards.json @@ -62,6 +62,9 @@ "default":"open", "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." } + }, + "body":{ + "description":"The search source (in order to specify slice parameters)" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml new file mode 100644 index 0000000000000..bf1a5429213df --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml @@ -0,0 +1,88 @@ +--- +"Search shards with slice specified in body": + - skip: + version: " - 2.99.99" + reason: "Added slice body to search_shards in 2.19" + - do: + indices.create: + index: test_index + body: + settings: + index: + number_of_shards: 7 + number_of_replicas: 0 + + - do: + search_shards: + index: test_index + body: + slice: + id: 0 + max: 3 + - length: { shards: 3 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 0 } + - match: { shards.1.0.shard: 3 } + - match: { shards.2.0.shard: 6 } + + - do: + search_shards: + index: test_index + body: + slice: + id: 1 + max: 3 + - length: { shards: 2 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 1 } + - match: { shards.1.0.shard: 4 } + + - do: + search_shards: + index: test_index + body: + slice: + id: 2 + max: 3 + - length: { shards: 2 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 2 } + - match: { shards.1.0.shard: 5 } + + + - do: + search_shards: + index: test_index + preference: "_shards:0,2,4,6" + body: + slice: + id: 0 + max: 3 + - length: { shards: 2 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 0 } + - match: { shards.1.0.shard: 6 } + + - do: + search_shards: + index: test_index + preference: "_shards:0,2,4,6" + body: + slice: + id: 1 + max: 3 + - length: { shards: 1 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 2 } + + - do: + search_shards: + index: test_index + preference: "_shards:0,2,4,6" + body: + slice: + id: 2 + max: 3 + - length: { shards: 1 } + - match: { shards.0.0.index: "test_index" } + - match: { shards.0.0.shard: 4 } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index 62e05ebb37e28..d4bf0efbd3eb5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -32,6 +32,7 @@ package org.opensearch.action.admin.cluster.shards; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; @@ -41,6 +42,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.search.slice.SliceBuilder; import java.io.IOException; import java.util.Objects; @@ -61,6 +63,8 @@ public class ClusterSearchShardsRequest extends ClusterManagerNodeReadRequest nodeIds = new HashSet<>(); GroupShardsIterator groupShardsIterator = clusterService.operationRouting() - .searchShards(clusterState, concreteIndices, routingMap, request.preference()); + .searchShards(clusterState, concreteIndices, routingMap, request.preference(), null, null, request.slice()); ShardRouting shard; ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()]; int currentGroup = 0; diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 10bf4975311d6..52937182e6a63 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -247,8 +247,7 @@ private AsyncShardsAction(FieldCapabilitiesIndexRequest request, ActionListener< throw blockException; } - shardsIt = clusterService.operationRouting() - .searchShards(clusterService.state(), new String[] { request.index() }, null, null, null, null); + shardsIt = clusterService.operationRouting().searchShards(clusterService.state(), new String[] { request.index() }, null, null); } public void start() { diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 8c4927afa9a14..dfec2e1fda738 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -85,6 +85,7 @@ import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.search.profile.ProfileShardResult; import org.opensearch.search.profile.SearchProfileShardResults; +import org.opensearch.search.slice.SliceBuilder; import org.opensearch.tasks.CancellableTask; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskResourceTrackingService; @@ -551,6 +552,7 @@ private ActionListener buildRewriteListener( ); } else { AtomicInteger skippedClusters = new AtomicInteger(0); + SliceBuilder slice = searchRequest.source() == null ? null : searchRequest.source().slice(); collectSearchShards( searchRequest.indicesOptions(), searchRequest.preference(), @@ -559,6 +561,7 @@ private ActionListener buildRewriteListener( remoteClusterIndices, remoteClusterService, threadPool, + slice, ActionListener.wrap(searchShardsResponses -> { final BiFunction clusterNodeLookup = getRemoteClusterNodeLookup( searchShardsResponses @@ -787,6 +790,7 @@ static void collectSearchShards( Map remoteIndicesByCluster, RemoteClusterService remoteClusterService, ThreadPool threadPool, + SliceBuilder slice, ActionListener> listener ) { final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); @@ -800,7 +804,8 @@ static void collectSearchShards( ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices).indicesOptions(indicesOptions) .local(true) .preference(preference) - .routing(routing); + .routing(routing) + .slice(slice); clusterClient.admin() .cluster() .searchShards( @@ -1042,6 +1047,7 @@ private void executeSearch( concreteLocalIndices[i] = indices[i].getName(); } Map nodeSearchCounts = searchTransportService.getPendingSearchRequests(); + SliceBuilder slice = searchRequest.source() == null ? null : searchRequest.source().slice(); GroupShardsIterator localShardRoutings = clusterService.operationRouting() .searchShards( clusterState, @@ -1049,7 +1055,8 @@ private void executeSearch( routingMap, searchRequest.preference(), searchService.getResponseCollectorService(), - nodeSearchCounts + nodeSearchCounts, + slice ); localShardIterators = StreamSupport.stream(localShardRoutings.spliterator(), false) .map(it -> new SearchShardIterator(searchRequest.getLocalClusterAlias(), it.shardId(), it.getShardRoutings(), localIndices)) diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index fe9e00b250e70..eac6f41acde4c 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -32,6 +32,7 @@ package org.opensearch.cluster.routing; +import org.apache.lucene.util.CollectionUtil; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; @@ -44,14 +45,17 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.common.Strings; +import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexNotFoundException; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.search.slice.SliceBuilder; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -230,7 +234,7 @@ public GroupShardsIterator searchShards( @Nullable Map> routing, @Nullable String preference ) { - return searchShards(clusterState, concreteIndices, routing, preference, null, null); + return searchShards(clusterState, concreteIndices, routing, preference, null, null, null); } public GroupShardsIterator searchShards( @@ -239,11 +243,14 @@ public GroupShardsIterator searchShards( @Nullable Map> routing, @Nullable String preference, @Nullable ResponseCollectorService collectorService, - @Nullable Map nodeCounts + @Nullable Map nodeCounts, + @Nullable SliceBuilder slice ) { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); - final Set set = new HashSet<>(shards.size()); + + Map> shardIterators = new HashMap<>(); for (IndexShardRoutingTable shard : shards) { + IndexMetadata indexMetadataForShard = indexMetadata(clusterState, shard.shardId.getIndex().getName()); if (indexMetadataForShard.isRemoteSnapshot() && (preference == null || preference.isEmpty())) { preference = Preference.PRIMARY.type(); @@ -274,10 +281,31 @@ public GroupShardsIterator searchShards( clusterState.metadata().weightedRoutingMetadata() ); if (iterator != null) { - set.add(iterator); + shardIterators.computeIfAbsent(iterator.shardId().getIndex(), k -> new ArrayList<>()).add(iterator); + } + } + List allShardIterators = new ArrayList<>(); + if (slice != null) { + for (List indexIterators : shardIterators.values()) { + // Filter the returned shards for the given slice + CollectionUtil.timSort(indexIterators); + // We use the ordinal of the iterator in the group (after sorting) rather than the shard id, because + // computeTargetedShards may return a subset of shards for an index, if a routing parameter was + // specified. In that case, the set of routable shards is considered the full universe of available + // shards for each index, when mapping shards to slices. If no routing parameter was specified, + // then ordinals and shard IDs are the same. This mimics the logic in + // org.opensearch.search.slice.SliceBuilder.toFilter. + for (int i = 0; i < indexIterators.size(); i++) { + if (slice.shardMatches(i, indexIterators.size())) { + allShardIterators.add(indexIterators.get(i)); + } + } } + } else { + shardIterators.values().forEach(allShardIterators::addAll); } - return GroupShardsIterator.sortAndCreate(new ArrayList<>(set)); + + return GroupShardsIterator.sortAndCreate(allShardIterators); } public static ShardIterator getShards(ClusterState clusterState, ShardId shardId) { @@ -311,6 +339,7 @@ private Set computeTargetedShards( set.add(indexShard); } } + } return set; } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java index 3555576433104..304d1cabefd35 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestClusterSearchShardsAction.java @@ -40,6 +40,7 @@ import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; import org.opensearch.rest.action.RestToXContentListener; +import org.opensearch.search.builder.SearchSourceBuilder; import java.io.IOException; import java.util.List; @@ -81,6 +82,13 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC clusterSearchShardsRequest.routing(request.param("routing")); clusterSearchShardsRequest.preference(request.param("preference")); clusterSearchShardsRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterSearchShardsRequest.indicesOptions())); + if (request.hasContentOrSourceParam()) { + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); + sourceBuilder.parseXContent(request.contentOrSourceParamParser()); + if (sourceBuilder.slice() != null) { + clusterSearchShardsRequest.slice(sourceBuilder.slice()); + } + } return channel -> client.admin().cluster().searchShards(clusterSearchShardsRequest, new RestToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index c9b8a896ed525..691b829578e1f 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -214,6 +214,15 @@ public int hashCode() { return Objects.hash(this.field, this.id, this.max); } + public boolean shardMatches(int shardOrdinal, int numShards) { + if (max >= numShards) { + // Slices are distributed over shards + return id % numShards == shardOrdinal; + } + // Shards are distributed over slices + return shardOrdinal % max == id; + } + /** * Converts this QueryBuilder to a lucene {@link Query}. * @@ -225,7 +234,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, throw new IllegalArgumentException("field " + field + " not found"); } - int shardId = request.shardId().id(); + int shardOrdinal = request.shardId().id(); int numShards = context.getIndexSettings().getNumberOfShards(); if ((request.preference() != null || request.indexRoutings().length > 0)) { GroupShardsIterator group = buildShardIterator(clusterService, request); @@ -241,21 +250,26 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, */ numShards = group.size(); int ord = 0; - shardId = -1; + shardOrdinal = -1; // remap the original shard id with its index (position) in the sorted shard iterator. for (ShardIterator it : group) { assert it.shardId().getIndex().equals(request.shardId().getIndex()); if (request.shardId().equals(it.shardId())) { - shardId = ord; + shardOrdinal = ord; break; } ++ord; } - assert shardId != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing"; + assert shardOrdinal != -1 : "shard id: " + request.shardId().getId() + " not found in index shard routing"; } } - String field = this.field; + if (shardMatches(shardOrdinal, numShards) == false) { + // We should have already excluded this shard before routing to it. + // If we somehow land here, then we match nothing. + return new MatchNoDocsQuery("this shard is not part of the slice"); + } + boolean useTermQuery = false; if ("_uid".equals(field)) { throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); @@ -277,12 +291,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, // the number of slices is greater than the number of shards // in such case we can reduce the number of requested shards by slice - // first we check if the slice is responsible of this shard int targetShard = id % numShards; - if (targetShard != shardId) { - // the shard is not part of this slice, we can skip it. - return new MatchNoDocsQuery("this shard is not part of the slice"); - } // compute the number of slices where this shard appears int numSlicesInShard = max / numShards; int rest = max % numShards; @@ -301,14 +310,8 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, ? new TermsSliceQuery(field, shardSlice, numSlicesInShard) : new DocValuesSliceQuery(field, shardSlice, numSlicesInShard); } - // the number of shards is greater than the number of slices + // the number of shards is greater than the number of slices. If we target this shard, we target all of it. - // check if the shard is assigned to the slice - int targetSlice = shardId % max; - if (id != targetSlice) { - // the shard is not part of this slice, we can skip it. - return new MatchNoDocsQuery("this shard is not part of the slice"); - } return new MatchAllDocsQuery(); } @@ -321,6 +324,8 @@ private GroupShardsIterator buildShardIterator(ClusterService clu Map> routingMap = request.indexRoutings().length > 0 ? Collections.singletonMap(indices[0], Sets.newHashSet(request.indexRoutings())) : null; + // Note that we do *not* want to filter this set of shard IDs based on the slice, since we want the + // full set of shards matched by the routing parameters. return clusterService.operationRouting().searchShards(state, indices, routingMap, request.preference()); } diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java index 84955d01a59ce..0a0015ae8cbf6 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java @@ -809,6 +809,7 @@ public void testCollectSearchShards() throws Exception { remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -835,6 +836,7 @@ public void testCollectSearchShards() throws Exception { remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -880,6 +882,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -907,6 +910,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); @@ -949,6 +953,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti remoteIndicesByCluster, remoteClusterService, threadPool, + null, new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index aaeeb52ab5709..4263e1aa347dc 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -604,7 +604,8 @@ public void testAdaptiveReplicaSelection() throws Exception { null, null, collector, - outstandingRequests + outstandingRequests, + null ); assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); @@ -616,7 +617,7 @@ public void testAdaptiveReplicaSelection() throws Exception { searchedShards.add(firstChoice); selectedNodes.add(firstChoice.currentNodeId()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); assertThat(groupIterator.size(), equalTo(numIndices * numShards)); ShardRouting secondChoice = groupIterator.get(0).nextOrNull(); @@ -624,7 +625,7 @@ public void testAdaptiveReplicaSelection() throws Exception { searchedShards.add(secondChoice); selectedNodes.add(secondChoice.currentNodeId()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); assertThat(groupIterator.size(), equalTo(numIndices * numShards)); ShardRouting thirdChoice = groupIterator.get(0).nextOrNull(); @@ -643,26 +644,26 @@ public void testAdaptiveReplicaSelection() throws Exception { outstandingRequests.put("node_1", 1L); outstandingRequests.put("node_2", 1L); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); ShardRouting shardChoice = groupIterator.get(0).nextOrNull(); // node 1 should be the lowest ranked node to start assertThat(shardChoice.currentNodeId(), equalTo("node_1")); // node 1 starts getting more loaded... collector.addNodeStatistics("node_1", 2, TimeValue.timeValueMillis(200).nanos(), TimeValue.timeValueMillis(150).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); shardChoice = groupIterator.get(0).nextOrNull(); assertThat(shardChoice.currentNodeId(), equalTo("node_1")); // and more loaded... collector.addNodeStatistics("node_1", 3, TimeValue.timeValueMillis(250).nanos(), TimeValue.timeValueMillis(200).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); shardChoice = groupIterator.get(0).nextOrNull(); assertThat(shardChoice.currentNodeId(), equalTo("node_1")); // and even more collector.addNodeStatistics("node_1", 4, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); shardChoice = groupIterator.get(0).nextOrNull(); // finally, node 2 is chosen instead assertThat(shardChoice.currentNodeId(), equalTo("node_2")); @@ -709,7 +710,8 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except null, null, collector, - outstandingRequests + outstandingRequests, + null ); assertThat("One group per index shard", groupIterator.size(), equalTo(numIndices * numShards)); @@ -722,7 +724,7 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except searchedShards.add(firstChoice); selectedNodes.add(firstChoice.currentNodeId()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); assertThat(groupIterator.size(), equalTo(numIndices * numShards)); assertThat(groupIterator.get(0).size(), equalTo(numReplicas + 1)); @@ -745,18 +747,18 @@ public void testAdaptiveReplicaSelectionWithZoneAwarenessIgnored() throws Except outstandingRequests.put("node_a1", 1L); outstandingRequests.put("node_b2", 1L); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); // node_a0 or node_a1 should be the lowest ranked node to start groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_a"))); // Adding more load to node_a0 collector.addNodeStatistics("node_a0", 10, TimeValue.timeValueMillis(200).nanos(), TimeValue.timeValueMillis(150).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); // Adding more load to node_a0 and node_a1 from zone-a collector.addNodeStatistics("node_a1", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); collector.addNodeStatistics("node_a0", 100, TimeValue.timeValueMillis(300).nanos(), TimeValue.timeValueMillis(250).nanos()); - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); // ARS should pick node_b2 from zone-b since both node_a0 and node_a1 are overloaded groupIterator.forEach(shardRoutings -> assertThat(shardRoutings.nextOrNull().currentNodeId(), containsString("node_b"))); @@ -842,8 +844,8 @@ public void testWeightedOperationRouting() throws Exception { null, null, collector, - outstandingRequests - + outstandingRequests, + null ); for (ShardIterator it : groupIterator) { @@ -871,7 +873,7 @@ public void testWeightedOperationRouting() throws Exception { opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); // search shards call - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); for (ShardIterator it : groupIterator) { List shardRoutings = Collections.singletonList(it.nextOrNull()); @@ -935,8 +937,8 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep null, null, collector, - outstandingRequests - + outstandingRequests, + null ); for (ShardIterator it : groupIterator) { @@ -969,7 +971,7 @@ public void testWeightedOperationRoutingWeightUndefinedForOneZone() throws Excep opRouting = new OperationRouting(setting, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); // search shards call - groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests); + groupIterator = opRouting.searchShards(state, indexNames, null, null, collector, outstandingRequests, null); for (ShardIterator it : groupIterator) { while (it.remaining() > 0) { From fa4595cf853f2f55b6a4ffc9f653330f6a25688d Mon Sep 17 00:00:00 2001 From: kkewwei Date: Wed, 15 Jan 2025 23:25:55 +0800 Subject: [PATCH 29/37] Upgrade HttpCore5/HttpClient5 to support ExtendedSocketOption in HttpAsyncClient (#16757) * upgrade httpcore5/httpclient5 to support ExtendedSocketOption in HttpAsyncClient Signed-off-by: kkewwei Signed-off-by: kkewwei * Use the Upgrade flow by default Signed-off-by: Andriy Redko * Update Reactor Netty to 1.1.26.Final Signed-off-by: Andriy Redko * Add SETTING_H2C_MAX_CONTENT_LENGTH to configure h2cMaxContentLength for reactor-netty4 transport Signed-off-by: Andriy Redko * Update Apache HttpCore5 to 5.3.2 Signed-off-by: Andriy Redko --------- Signed-off-by: kkewwei Signed-off-by: kkewwei Signed-off-by: Andriy Redko Co-authored-by: Andriy Redko --- CHANGELOG-3.0.md | 1 + .../org/opensearch/bootstrap/test.policy | 11 +++++++++++ .../rest/licenses/httpclient5-5.3.1.jar.sha1 | 1 - .../rest/licenses/httpclient5-5.4.1.jar.sha1 | 1 + client/rest/licenses/httpcore5-5.2.5.jar.sha1 | 1 - client/rest/licenses/httpcore5-5.3.2.jar.sha1 | 1 + .../rest/licenses/httpcore5-h2-5.2.5.jar.sha1 | 1 - .../rest/licenses/httpcore5-h2-5.3.2.jar.sha1 | 1 + .../licenses/httpcore5-reactive-5.2.5.jar.sha1 | 1 - .../licenses/httpcore5-reactive-5.3.2.jar.sha1 | 1 + .../client/RestClientBuilderTests.java | 7 +++++++ .../client/RestClientSingleHostIntegTests.java | 10 +++++++++- .../documentation/RestClientDocumentation.java | 18 ++++++++++++++++++ .../licenses/httpclient5-5.3.1.jar.sha1 | 1 - .../licenses/httpclient5-5.4.1.jar.sha1 | 1 + .../sniffer/licenses/httpcore5-5.2.5.jar.sha1 | 1 - .../sniffer/licenses/httpcore5-5.3.2.jar.sha1 | 1 + gradle/libs.versions.toml | 6 +++--- .../reactor-netty-core-1.1.23.jar.sha1 | 1 - .../reactor-netty-core-1.1.26.jar.sha1 | 1 + .../reactor-netty-http-1.1.23.jar.sha1 | 1 - .../reactor-netty-http-1.1.26.jar.sha1 | 1 + .../reactor-netty-core-1.1.23.jar.sha1 | 1 - .../reactor-netty-core-1.1.26.jar.sha1 | 1 + .../reactor-netty-http-1.1.23.jar.sha1 | 1 - .../reactor-netty-http-1.1.26.jar.sha1 | 1 + .../ReactorNetty4HttpServerTransport.java | 17 +++++++++++++++++ .../transport/reactor/ReactorNetty4Plugin.java | 2 +- .../opensearch/bootstrap/test-framework.policy | 2 +- 29 files changed, 78 insertions(+), 16 deletions(-) create mode 100644 client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy delete mode 100644 client/rest/licenses/httpclient5-5.3.1.jar.sha1 create mode 100644 client/rest/licenses/httpclient5-5.4.1.jar.sha1 delete mode 100644 client/rest/licenses/httpcore5-5.2.5.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-5.3.2.jar.sha1 delete mode 100644 client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1 delete mode 100644 client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 create mode 100644 client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1 delete mode 100644 client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 create mode 100644 client/sniffer/licenses/httpclient5-5.4.1.jar.sha1 delete mode 100644 client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 create mode 100644 client/sniffer/licenses/httpcore5-5.3.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 diff --git a/CHANGELOG-3.0.md b/CHANGELOG-3.0.md index 48d978bede420..fddead96aaf45 100644 --- a/CHANGELOG-3.0.md +++ b/CHANGELOG-3.0.md @@ -15,6 +15,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Views, simplify data access and manipulation by providing a virtual layer over one or more indices ([#11957](https://github.com/opensearch-project/OpenSearch/pull/11957)) ### Dependencies +- Bump Apache HttpCore5/HttpClient5 dependencies from 5.2.5/5.3.1 to 5.3.1/5.4.1 to support ExtendedSocketOption in HttpAsyncClient ([#16757](https://github.com/opensearch-project/OpenSearch/pull/16757)) ### Changed - Changed locale provider from COMPAT to CLDR ([#14345](https://github.com/opensearch-project/OpenSearch/pull/14345)) diff --git a/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy b/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy new file mode 100644 index 0000000000000..2604c2492d8ab --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/opensearch/bootstrap/test.policy @@ -0,0 +1,11 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { + permission java.net.SocketPermission "*", "connect,resolve"; +}; diff --git a/client/rest/licenses/httpclient5-5.3.1.jar.sha1 b/client/rest/licenses/httpclient5-5.3.1.jar.sha1 deleted file mode 100644 index c8f32c1ec23a1..0000000000000 --- a/client/rest/licenses/httpclient5-5.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -56b53c8f4bcdaada801d311cf2ff8a24d6d96883 \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.4.1.jar.sha1 b/client/rest/licenses/httpclient5-5.4.1.jar.sha1 new file mode 100644 index 0000000000000..40156e9a42620 --- /dev/null +++ b/client/rest/licenses/httpclient5-5.4.1.jar.sha1 @@ -0,0 +1 @@ +ce913081e592ee8eeee35c4e577d7dce13cba7a4 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-5.2.5.jar.sha1 deleted file mode 100644 index ca97e8612ea39..0000000000000 --- a/client/rest/licenses/httpcore5-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.3.2.jar.sha1 b/client/rest/licenses/httpcore5-5.3.2.jar.sha1 new file mode 100644 index 0000000000000..44c13325b5647 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.3.2.jar.sha1 @@ -0,0 +1 @@ +35d387301d4a719972b15fbe863020da5f913c22 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 deleted file mode 100644 index bb40fe65854f6..0000000000000 --- a/client/rest/licenses/httpcore5-h2-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -09425df4d1365cee86a8e031a036bdca4343da4b \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1 new file mode 100644 index 0000000000000..67c92d8fea09c --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.3.2.jar.sha1 @@ -0,0 +1 @@ +d908a946e9161511accdc739e443b1e0b0cbba82 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 deleted file mode 100644 index ab9241fc93d45..0000000000000 --- a/client/rest/licenses/httpcore5-reactive-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f68949965075b957c12b4c1ef89fd4bab2a0fdb1 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1 b/client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1 new file mode 100644 index 0000000000000..345d71cb206ae --- /dev/null +++ b/client/rest/licenses/httpcore5-reactive-5.3.2.jar.sha1 @@ -0,0 +1 @@ +9ee35ef1d3e40855695fc87ad2e31192d85c1e88 \ No newline at end of file diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java index 7165174e688e1..c9ad10a476f74 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java @@ -37,6 +37,7 @@ import org.apache.hc.core5.http.Header; import org.apache.hc.core5.http.HttpHost; import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.reactor.IOReactorConfig; import org.apache.hc.core5.util.Timeout; import java.io.IOException; @@ -143,6 +144,12 @@ public void testBuild() throws IOException { builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) { + IOReactorConfig.Builder iOReactorConfig = IOReactorConfig.custom(); + iOReactorConfig.setTcpKeepCount(randomIntBetween(4, 10)); + iOReactorConfig.setTcpKeepInterval(randomIntBetween(5, 10)); + iOReactorConfig.setTcpKeepIdle(randomIntBetween(100, 200)); + iOReactorConfig.setIoThreadCount(2); + httpClientBuilder.setIOReactorConfig(iOReactorConfig.build()); return httpClientBuilder; } }); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java index de04dd843b2db..84f6e7c8beb2e 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java @@ -382,6 +382,10 @@ public void testHeaders() throws Exception { if (method.equals("HEAD") == false) { standardHeaders.add("Content-length"); } + if (method.equals("HEAD") == true || method.equals("GET") == true || method.equals("OPTIONS") == true) { + standardHeaders.add("Upgrade"); + } + final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header"); final int statusCode = randomStatusCode(getRandom()); Request request = new Request(method, "/" + statusCode); @@ -400,11 +404,15 @@ public void testHeaders() throws Exception { assertEquals(method, esResponse.getRequestLine().getMethod()); assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); assertEquals(pathPrefix + "/" + statusCode, esResponse.getRequestLine().getUri()); + assertHeaders(defaultHeaders, requestHeaders, esResponse.getHeaders(), standardHeaders); + final Set removedHeaders = new HashSet<>(); for (final Header responseHeader : esResponse.getHeaders()) { String name = responseHeader.getName(); - if (name.startsWith("Header") == false) { + // Some headers could be returned multiple times in response, like Connection fe. + if (name.startsWith("Header") == false && removedHeaders.contains(name) == false) { assertTrue("unknown header was returned " + name, standardHeaders.remove(name)); + removedHeaders.add(name); } } assertTrue("some expected standard headers weren't returned: " + standardHeaders, standardHeaders.isEmpty()); diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index 42c31864e0578..d9c82307cae8a 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -376,6 +376,24 @@ public HttpAsyncClientBuilder customizeHttpClient( }); //end::rest-client-config-threads } + { + //tag::rest-client-config-tcpKeepIdle/tcpKeepInterval/tcpKeepCount + RestClientBuilder builder = RestClient.builder( + new HttpHost("localhost", 9200)) + .setHttpClientConfigCallback(new HttpClientConfigCallback() { + @Override + public HttpAsyncClientBuilder customizeHttpClient( + HttpAsyncClientBuilder httpClientBuilder) { + return httpClientBuilder.setIOReactorConfig( + IOReactorConfig.custom() + .setTcpKeepIdle(200) + .setTcpKeepInterval(10) + .setTcpKeepCount(10) + .build()); + } + }); + //end::rest-client-config-tcpKeepIdle/tcpKeepInterval/tcpKeepCount + } { //tag::rest-client-config-basic-auth final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); diff --git a/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 deleted file mode 100644 index c8f32c1ec23a1..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.3.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -56b53c8f4bcdaada801d311cf2ff8a24d6d96883 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.4.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.4.1.jar.sha1 new file mode 100644 index 0000000000000..40156e9a42620 --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.4.1.jar.sha1 @@ -0,0 +1 @@ +ce913081e592ee8eeee35c4e577d7dce13cba7a4 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 deleted file mode 100644 index ca97e8612ea39..0000000000000 --- a/client/sniffer/licenses/httpcore5-5.2.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dab1e18842971a45ca8942491ce005ab86a028d7 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.3.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.3.2.jar.sha1 new file mode 100644 index 0000000000000..44c13325b5647 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.3.2.jar.sha1 @@ -0,0 +1 @@ +35d387301d4a719972b15fbe863020da5f913c22 \ No newline at end of file diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 1cd2f8d87e1d4..96f6178295f00 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -37,12 +37,12 @@ joda = "2.12.7" roaringbitmap = "1.3.0" # project reactor -reactor_netty = "1.1.23" +reactor_netty = "1.1.26" reactor = "3.5.20" # client dependencies -httpclient5 = "5.3.1" -httpcore5 = "5.2.5" +httpclient5 = "5.4.1" +httpcore5 = "5.3.2" httpclient = "4.5.14" httpcore = "4.4.16" httpasyncclient = "4.1.5" diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1 deleted file mode 100644 index 8f56bb5165fa3..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7059b0c18ab7aa0fa9e08b48cb6a20b15c11478 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 new file mode 100644 index 0000000000000..e64cc3645514f --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.26.jar.sha1 @@ -0,0 +1 @@ +05a8c6004161a4c1a9c0639b05387baab6efaa32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1 deleted file mode 100644 index 5bb3136f99e93..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94b294fa90aee2e88ad4337251e278aaac21362c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 new file mode 100644 index 0000000000000..035d2fb1c4c4c --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.26.jar.sha1 @@ -0,0 +1 @@ +41682e517e2808fc469d6b2b85fea48d0a7fe73b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1 deleted file mode 100644 index 8f56bb5165fa3..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7059b0c18ab7aa0fa9e08b48cb6a20b15c11478 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 new file mode 100644 index 0000000000000..e64cc3645514f --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.26.jar.sha1 @@ -0,0 +1 @@ +05a8c6004161a4c1a9c0639b05387baab6efaa32 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1 deleted file mode 100644 index 5bb3136f99e93..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -94b294fa90aee2e88ad4337251e278aaac21362c \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 new file mode 100644 index 0000000000000..035d2fb1c4c4c --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.26.jar.sha1 @@ -0,0 +1 @@ +41682e517e2808fc469d6b2b85fea48d0a7fe73b \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java index 3dcee4e8ec045..77648ed7e785c 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/http/reactor/netty4/ReactorNetty4HttpServerTransport.java @@ -18,6 +18,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.util.net.NetUtils; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.http.AbstractHttpServerTransport; @@ -87,6 +88,19 @@ public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTranspor private static final String SETTING_KEY_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = "http.netty.max_composite_buffer_components"; private static final ByteSizeValue MTU = new ByteSizeValue(Long.parseLong(System.getProperty("opensearch.net.mtu", "1500"))); + /** + * Configure the maximum length of the content of the HTTP/2.0 clear-text upgrade request. + * By default the server will reject an upgrade request with non-empty content, + * because the upgrade request is most likely a GET request. If the client sends + * a non-GET upgrade request, {@link #h2cMaxContentLength} specifies the maximum + * length of the content of the upgrade request. + */ + public static final Setting SETTING_H2C_MAX_CONTENT_LENGTH = Setting.byteSizeSetting( + "h2c.max_content_length", + new ByteSizeValue(65536, ByteSizeUnit.KB), + Property.NodeScope + ); + /** * The number of Reactor Netty HTTP workers */ @@ -133,6 +147,7 @@ public class ReactorNetty4HttpServerTransport extends AbstractHttpServerTranspor private final ByteSizeValue maxInitialLineLength; private final ByteSizeValue maxHeaderSize; private final ByteSizeValue maxChunkSize; + private final ByteSizeValue h2cMaxContentLength; private final SecureHttpTransportSettingsProvider secureHttpTransportSettingsProvider; private volatile SharedGroupFactory.SharedGroup sharedGroup; private volatile DisposableServer disposableServer; @@ -208,6 +223,7 @@ public ReactorNetty4HttpServerTransport( this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + this.h2cMaxContentLength = SETTING_H2C_MAX_CONTENT_LENGTH.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); this.secureHttpTransportSettingsProvider = secureHttpTransportSettingsProvider; } @@ -228,6 +244,7 @@ protected HttpServerChannel bind(InetSocketAddress socketAddress) throws Excepti .compress(true) .httpRequestDecoder( spec -> spec.maxChunkSize(maxChunkSize.bytesAsInt()) + .h2cMaxContentLength(h2cMaxContentLength.bytesAsInt()) .maxHeaderSize(maxHeaderSize.bytesAsInt()) .maxInitialLineLength(maxInitialLineLength.bytesAsInt()) .allowPartialChunks(false) diff --git a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java index 6e5b0215b58a4..90ed1fe729d3a 100644 --- a/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java +++ b/plugins/transport-reactor-netty4/src/main/java/org/opensearch/transport/reactor/ReactorNetty4Plugin.java @@ -57,7 +57,7 @@ public ReactorNetty4Plugin() {} */ @Override public List> getSettings() { - return Arrays.asList(/* no setting registered since we're picking the onces from Netty 4 transport */); + return Arrays.asList(ReactorNetty4HttpServerTransport.SETTING_H2C_MAX_CONTENT_LENGTH); } /** diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index c62adda511140..e1a3b4618035e 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -120,7 +120,7 @@ grant codeBase "${codebase.httpcore5}" { grant codeBase "${codebase.httpclient5}" { // httpclient5 makes socket connections for rest tests - permission java.net.SocketPermission "*", "connect"; + permission java.net.SocketPermission "*", "connect,resolve"; }; grant codeBase "${codebase.httpcore-nio}" { From 73e11af21c440c326f3685f5b13b359cf41bba92 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 15 Jan 2025 11:51:10 -0500 Subject: [PATCH 30/37] Update version checks for backport (#17030) Signed-off-by: Michael Froh Signed-off-by: Andriy Redko Co-authored-by: Michael Froh --- .../test/search.aggregation/70_adjacency_matrix.yml | 6 +++--- .../bucket/adjacency/AdjacencyMatrixAggregationBuilder.java | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml index ccd194eff6f51..8b1956c6152d2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/70_adjacency_matrix.yml @@ -130,12 +130,12 @@ setup: --- "Show only intersections": - skip: - version: " - 2.99.99" - reason: "show_only_intersecting was added in 3.0.0" + version: " - 2.19.0" + reason: "show_only_intersecting was added in 2.19.0" features: node_selector - do: node_selector: - version: "3.0.0 - " + version: "2.19.0 - " search: index: test rest_total_hits_as_int: true diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index 1b6a7e1158b83..e4a454ee64609 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -188,7 +188,7 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException { super(in); int filtersSize = in.readVInt(); separator = in.readString(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { showOnlyIntersecting = in.readBoolean(); } filters = new ArrayList<>(filtersSize); @@ -201,7 +201,7 @@ public AdjacencyMatrixAggregationBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(filters.size()); out.writeString(separator); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { out.writeBoolean(showOnlyIntersecting); } for (KeyedFilter keyedFilter : filters) { From 6202ab08980b39b301fef305a79138d779b92f56 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 15 Jan 2025 14:09:03 -0500 Subject: [PATCH 31/37] Fix versions and breaking API changes (#17031) Signed-off-by: Andriy Redko --- .../resources/rest-api-spec/test/search_shards/20_slice.yml | 2 +- .../admin/cluster/shards/ClusterSearchShardsRequest.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml index bf1a5429213df..dafb38df20157 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search_shards/20_slice.yml @@ -1,7 +1,7 @@ --- "Search shards with slice specified in body": - skip: - version: " - 2.99.99" + version: " - 2.18.99" reason: "Added slice body to search_shards in 2.19" - do: indices.create: diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index d4bf0efbd3eb5..06bd4da1931de 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -80,7 +80,7 @@ public ClusterSearchShardsRequest(StreamInput in) throws IOException { preference = in.readOptionalString(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_19_0)) { boolean hasSlice = in.readBoolean(); if (hasSlice) { sliceBuilder = new SliceBuilder(in); @@ -95,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(routing); out.writeOptionalString(preference); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_19_0)) { if (sliceBuilder != null) { out.writeBoolean(true); sliceBuilder.writeTo(out); From 34ef1462abd55e931961c08d21b1dfa5855b8121 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 15 Jan 2025 15:11:18 -0500 Subject: [PATCH 32/37] Bump com.nimbusds:nimbus-jose-jwt from 9.47 to 10.0.1 in /test/fixtures/hdfs-fixture (#17011) * Bump com.nimbusds:nimbus-jose-jwt in /test/fixtures/hdfs-fixture Bumps [com.nimbusds:nimbus-jose-jwt](https://bitbucket.org/connect2id/nimbus-jose-jwt) from 9.47 to 10.0.1. - [Changelog](https://bitbucket.org/connect2id/nimbus-jose-jwt/src/master/CHANGELOG.txt) - [Commits](https://bitbucket.org/connect2id/nimbus-jose-jwt/branches/compare/10.0.1..9.47) --- updated-dependencies: - dependency-name: com.nimbusds:nimbus-jose-jwt dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- CHANGELOG.md | 2 +- test/fixtures/hdfs-fixture/build.gradle | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fd5efdc986d1..241d88049214d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.azure:azure-storage-blob` from 12.23.0 to 12.28.1 ([#16501](https://github.com/opensearch-project/OpenSearch/pull/16501)) - Bump `org.apache.hadoop:hadoop-minicluster` from 3.4.0 to 3.4.1 ([#16550](https://github.com/opensearch-project/OpenSearch/pull/16550)) - Bump `org.apache.xmlbeans:xmlbeans` from 5.2.1 to 5.3.0 ([#16612](https://github.com/opensearch-project/OpenSearch/pull/16612), [#16854](https://github.com/opensearch-project/OpenSearch/pull/16854)) -- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 9.47 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.41.1 to 10.0.1 ([#16611](https://github.com/opensearch-project/OpenSearch/pull/16611), [#16807](https://github.com/opensearch-project/OpenSearch/pull/16807), [#17011](https://github.com/opensearch-project/OpenSearch/pull/17011)) - Bump `lycheeverse/lychee-action` from 2.0.2 to 2.2.0 ([#16610](https://github.com/opensearch-project/OpenSearch/pull/16610), [#16897](https://github.com/opensearch-project/OpenSearch/pull/16897)) - Bump `me.champeau.gradle.japicmp` from 0.4.4 to 0.4.5 ([#16614](https://github.com/opensearch-project/OpenSearch/pull/16614)) - Bump `mockito` from 5.14.1 to 5.14.2, `objenesis` from 3.2 to 3.3 and `bytebuddy` from 1.15.4 to 1.15.10 ([#16655](https://github.com/opensearch-project/OpenSearch/pull/16655)) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index bb2b7ebafdf81..fdbd3ed0d3571 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.28.0' api 'org.apache.commons:commons-configuration2:2.11.0' - api 'com.nimbusds:nimbus-jose-jwt:9.47' + api 'com.nimbusds:nimbus-jose-jwt:10.0.1' api ('org.apache.kerby:kerb-admin:2.1.0') { exclude group: "org.jboss.xnio" exclude group: "org.jline" From 13159c1693d088417683f8d7f2a018ea7e8a6866 Mon Sep 17 00:00:00 2001 From: Mohit Godwani <81609427+mgodwan@users.noreply.github.com> Date: Thu, 16 Jan 2025 12:48:39 +0530 Subject: [PATCH 33/37] Remove user data from logs when not in debug/trace mode (#17007) * Remove user data from logs when not in debug/trace mode Signed-off-by: Mohit Godwani --- .../test/delete_by_query/50_wait_for_active_shards.yml | 2 +- .../test/reindex/60_wait_for_active_shards.yml | 2 +- .../test/update_by_query/50_consistency.yml | 2 +- .../action/support/WaitActiveShardCountIT.java | 4 ++-- .../support/replication/ReplicationOperation.java | 10 +--------- .../replication/TransportReplicationAction.java | 2 +- .../org/opensearch/action/update/UpdateHelper.java | 7 ++++++- 7 files changed, 13 insertions(+), 16 deletions(-) diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml index ea8ed4df3e748..39cf36847f25d 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/delete_by_query/50_wait_for_active_shards.yml @@ -25,7 +25,7 @@ match_all: {} - match: - failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.+/ + failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\]/ - do: indices.refresh: {} diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml index 3498e555d2879..a580c55a95130 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/reindex/60_wait_for_active_shards.yml @@ -25,7 +25,7 @@ dest: index: dest - match: - failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)\..Timeout\:.\[1s\],.request:.+/ + failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)\..Timeout\:.\[1s\]/ - do: reindex: diff --git a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml index 4a067580b54d3..e97eacc3c9c25 100644 --- a/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml +++ b/modules/reindex/src/yamlRestTest/resources/rest-api-spec/test/update_by_query/50_consistency.yml @@ -21,7 +21,7 @@ wait_for_active_shards: 4 timeout: 1s - match: - failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\],.request:.+/ + failures.0.cause.reason: /Not.enough.active.copies.to.meet.shard.count.of.\[4\].\(have.1,.needed.4\)..Timeout\:.\[1s\]/ - do: update_by_query: diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java index 08cffac8aac5d..c4ffbccf0ab99 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/WaitActiveShardCountIT.java @@ -76,7 +76,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); assertThat( e.getMessage(), - startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms], request:") + startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms]") ); // but really, all is well } @@ -120,7 +120,7 @@ public void testReplicationWaitsForActiveShardCount() throws Exception { startsWith( "[test][0] Not enough active copies to meet shard count of [" + ActiveShardCount.ALL - + "] (have 2, needed 3). Timeout: [100ms], request:" + + "] (have 2, needed 3). Timeout: [100ms]" ) ); // but really, all is well diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index 9f69d41d83f5b..12d3502184ac4 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -141,15 +141,7 @@ public void execute() throws Exception { final ShardRouting primaryRouting = primary.routingEntry(); final ShardId primaryId = primaryRouting.shardId(); if (activeShardCountFailure != null) { - finishAsFailed( - new UnavailableShardsException( - primaryId, - "{} Timeout: [{}], request: [{}]", - activeShardCountFailure, - request.timeout(), - request - ) - ); + finishAsFailed(new UnavailableShardsException(primaryId, "{} Timeout: [{}]", activeShardCountFailure, request.timeout())); return; } diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 49a96603f6802..637a7a31d78cc 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -1246,7 +1246,7 @@ void finishOnSuccess(Response response) { } void retryBecauseUnavailable(ShardId shardId, String message) { - retry(new UnavailableShardsException(shardId, "{} Timeout: [{}], request: [{}]", message, request.timeout(), request)); + retry(new UnavailableShardsException(shardId, "{} Timeout: [{}]", message, request.timeout())); } } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java index 19c32f9336df8..c02ec1fbb9cf0 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java @@ -58,6 +58,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.script.Script; import org.opensearch.script.ScriptService; +import org.opensearch.script.ScriptType; import org.opensearch.script.UpdateScript; import org.opensearch.search.lookup.SourceLookup; @@ -128,7 +129,11 @@ Tuple> executeScriptedUpsert(Map Date: Thu, 16 Jan 2025 11:19:28 -0800 Subject: [PATCH 34/37] [Bugfix] Fix cache maximum size settings not working properly with pluggable caching (#16636) * Fix cache size setting Signed-off-by: Peter Alfonsi * Changelog Signed-off-by: Peter Alfonsi * Deprecate original IRC size setting Signed-off-by: Peter Alfonsi * spotlessApply Signed-off-by: Peter Alfonsi * Addressed Ankit's comments Signed-off-by: Peter Alfonsi * Address Sagar's comment Signed-off-by: Peter Alfonsi --------- Signed-off-by: Peter Alfonsi Signed-off-by: Peter Alfonsi Signed-off-by: Ankit Jain Co-authored-by: Peter Alfonsi Co-authored-by: Ankit Jain --- CHANGELOG.md | 1 + .../common/tier/TieredSpilloverCache.java | 15 ++ .../tier/TieredSpilloverCacheSettings.java | 6 + .../cache/common/tier/MockDiskCache.java | 4 + .../tier/TieredSpilloverCacheTests.java | 135 +++++++++++++++++- .../cache/EhcacheDiskCacheSettings.java | 1 + .../cache/store/disk/EhcacheDiskCache.java | 5 + .../store/disk/EhCacheDiskCacheTests.java | 61 ++++++++ .../common/cache/service/CacheService.java | 22 ++- .../cache/store/OpenSearchOnHeapCache.java | 14 +- .../OpenSearchOnHeapCacheSettings.java | 1 + .../indices/IndicesRequestCache.java | 74 ++++++---- .../store/OpenSearchOnHeapCacheTests.java | 75 +++++++--- .../settings/MemorySizeSettingsTests.java | 3 + .../indices/IndicesRequestCacheTests.java | 48 +++++++ 15 files changed, 403 insertions(+), 62 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 241d88049214d..21cdf30867e74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -95,6 +95,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix case insensitive and escaped query on wildcard ([#16827](https://github.com/opensearch-project/OpenSearch/pull/16827)) - Fix _list/shards API failing when closed indices are present ([#16606](https://github.com/opensearch-project/OpenSearch/pull/16606)) - Fix remote shards balance ([#15335](https://github.com/opensearch-project/OpenSearch/pull/15335)) +- Fix max request cache size settings not working properly with pluggable caching ([#16636](https://github.com/opensearch-project/OpenSearch/pull/16636)) - Always use `constant_score` query for `match_only_text` field ([#16964](https://github.com/opensearch-project/OpenSearch/pull/16964)) - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java index 38a6915ffd10e..9879235812377 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCache.java @@ -150,6 +150,9 @@ static class TieredSpilloverCacheSegment implements ICache { private final TieredSpilloverCacheStatsHolder statsHolder; + private final long onHeapCacheMaxWeight; + private final long diskCacheMaxWeight; + /** * This map is used to handle concurrent requests for same key in computeIfAbsent() to ensure we load the value * only once. @@ -218,6 +221,8 @@ static class TieredSpilloverCacheSegment implements ICache { cacheListMap.put(diskCache, new TierInfo(isDiskCacheEnabled, TIER_DIMENSION_VALUE_DISK)); this.caches = Collections.synchronizedMap(cacheListMap); this.policies = builder.policies; // Will never be null; builder initializes it to an empty list + this.onHeapCacheMaxWeight = onHeapCacheSizeInBytes; + this.diskCacheMaxWeight = diskCacheSizeInBytes; } // Package private for testing @@ -526,6 +531,16 @@ void updateStatsOnPut(String destinationTierValue, ICacheKey key, V value) { statsHolder.incrementSizeInBytes(dimensionValues, weigher.applyAsLong(key, value)); } + // pkg-private for testing + long getOnHeapCacheMaxWeight() { + return onHeapCacheMaxWeight; + } + + // pkg-private for testing + long getDiskCacheMaxWeight() { + return diskCacheMaxWeight; + } + /** * A class which receives removal events from the heap tier. */ diff --git a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java index 122d00af3bd1e..31dc1795134e4 100644 --- a/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java +++ b/modules/cache-common/src/main/java/org/opensearch/cache/common/tier/TieredSpilloverCacheSettings.java @@ -85,6 +85,9 @@ public class TieredSpilloverCacheSettings { /** * Setting which defines the onHeap cache size to be used within tiered cache. + * This setting overrides size settings from the heap tier implementation. + * For example, if OpenSearchOnHeapCache is the heap tier in the request cache, and + * indices.requests.cache.opensearch_onheap.size is set, that value will be ignored in favor of this setting. * * Pattern: {cache_type}.tiered_spillover.onheap.store.size * Example: indices.request.cache.tiered_spillover.onheap.store.size @@ -96,6 +99,9 @@ public class TieredSpilloverCacheSettings { /** * Setting which defines the disk cache size to be used within tiered cache. + * This setting overrides the size setting from the disk tier implementation. + * For example, if EhcacheDiskCache is the disk tier in the request cache, and + * indices.requests.cache.ehcache_disk.max_size_in_bytes is set, that value will be ignored in favor of this setting. */ public static final Setting.AffixSetting TIERED_SPILLOVER_DISK_STORE_SIZE = Setting.suffixKeySetting( TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ".disk.store.size", diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java index fcddd489a27aa..78302cede402f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/MockDiskCache.java @@ -128,6 +128,10 @@ public void close() { } + long getMaximumWeight() { + return maxSize; + } + public static class MockDiskCacheFactory implements Factory { public static final String NAME = "mockDiskCache"; diff --git a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java index 3bb1321f9faf2..494534ac74c9f 100644 --- a/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java +++ b/modules/cache-common/src/test/java/org/opensearch/cache/common/tier/TieredSpilloverCacheTests.java @@ -58,6 +58,7 @@ import static org.opensearch.cache.common.tier.TieredSpilloverCache.ZERO_SEGMENT_COUNT_EXCEPTION_MESSAGE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.DISK_CACHE_ENABLED_SETTING_MAP; +import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.MIN_DISK_CACHE_SIZE_IN_BYTES; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS; import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP; @@ -2166,6 +2167,134 @@ public void testDropStatsForDimensions() throws Exception { assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), tieredSpilloverCache.stats().getTotalStats()); } + public void testSegmentSizesWhenUsingFactory() { + // The TSC's tier size settings, TIERED_SPILLOVER_ONHEAP_STORE_SIZE and TIERED_SPILLOVER_DISK_STORE_SIZE, + // should always be respected, overriding the individual implementation's size settings if present + long expectedHeapSize = 256L * between(10, 20); + long expectedDiskSize = MIN_DISK_CACHE_SIZE_IN_BYTES + 256L * between(30, 40); + long heapSizeFromImplSetting = 50; + int diskSizeFromImplSetting = 50; + int numSegments = getNumberOfSegments(); + + int keyValueSize = 1; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + Settings settings = Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_NAME.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + MockDiskCache.MockDiskCacheFactory.NAME + ) + // These two size settings should be honored + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + expectedHeapSize + "b" + ) + .put( + TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + expectedDiskSize + ) + // The size setting from the OpenSearchOnHeap implementation should not be honored + .put( + OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + heapSizeFromImplSetting + "b" + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + .put( + TIERED_SPILLOVER_SEGMENTS.getConcreteSettingForNamespace(CacheType.INDICES_REQUEST_CACHE.getSettingPrefix()).getKey(), + numSegments + ) + .build(); + String storagePath = getStoragePath(settings); + + TieredSpilloverCache tieredSpilloverCache = (TieredSpilloverCache< + String, + String>) new TieredSpilloverCache.TieredSpilloverCacheFactory().create( + new CacheConfig.Builder().setKeyType(String.class) + .setKeyType(String.class) + .setWeigher((k, v) -> keyValueSize) + .setRemovalListener(removalListener) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setSettings(settings) + .setDimensionNames(dimensionNames) + .setCachedResultParser(s -> new CachedQueryResult.PolicyValues(20_000_000L)) // Values will always appear to have taken + // 20_000_000 ns = 20 ms to compute + .setClusterSettings(clusterSettings) + .setStoragePath(storagePath) + .build(), + CacheType.INDICES_REQUEST_CACHE, + Map.of( + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME, + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + MockDiskCache.MockDiskCacheFactory.NAME, + // The size value passed in here acts as the "implementation setting" for the disk tier, and should also be ignored + new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, false, keyValueSize) + ) + ); + checkSegmentSizes(tieredSpilloverCache, expectedHeapSize, expectedDiskSize); + } + + public void testSegmentSizesWhenNotUsingFactory() { + long expectedHeapSize = 256L * between(10, 20); + long expectedDiskSize = MIN_DISK_CACHE_SIZE_IN_BYTES + 256L * between(30, 40); + int heapSizeFromImplSetting = 50; + int diskSizeFromImplSetting = 50; + + Settings settings = Settings.builder() + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + TieredSpilloverCache.TieredSpilloverCacheFactory.TIERED_SPILLOVER_CACHE_NAME + ) + .put(FeatureFlags.PLUGGABLE_CACHE, "true") + // The size setting from the OpenSearchOnHeapCache implementation should not be honored + .put( + OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), + heapSizeFromImplSetting + "b" + ) + .build(); + + int keyValueSize = 1; + MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); + int numSegments = getNumberOfSegments(); + CacheConfig cacheConfig = getCacheConfig(1, settings, removalListener, numSegments); + TieredSpilloverCache tieredSpilloverCache = getTieredSpilloverCache( + new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(), + new MockDiskCache.MockDiskCacheFactory(0, diskSizeFromImplSetting, true, keyValueSize), + cacheConfig, + null, + removalListener, + numSegments, + expectedHeapSize, + expectedDiskSize + ); + checkSegmentSizes(tieredSpilloverCache, expectedHeapSize, expectedDiskSize); + } + + private void checkSegmentSizes(TieredSpilloverCache cache, long expectedHeapSize, long expectedDiskSize) { + TieredSpilloverCache.TieredSpilloverCacheSegment segment = cache.tieredSpilloverCacheSegments[0]; + assertEquals(expectedHeapSize / cache.getNumberOfSegments(), segment.getOnHeapCacheMaxWeight()); + assertEquals(expectedDiskSize / cache.getNumberOfSegments(), segment.getDiskCacheMaxWeight()); + } + private List getMockDimensions() { List dims = new ArrayList<>(); for (String dimensionName : dimensionNames) { @@ -2455,9 +2584,9 @@ private void verifyComputeIfAbsentThrowsException( MockCacheRemovalListener removalListener = new MockCacheRemovalListener<>(); Settings settings = Settings.builder() .put( - OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) - .get(MAXIMUM_SIZE_IN_BYTES_KEY) - .getKey(), + TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE.getConcreteSettingForNamespace( + CacheType.INDICES_REQUEST_CACHE.getSettingPrefix() + ).getKey(), onHeapCacheSize * keyValueSize + "b" ) .build(); diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java index cbc104f2d0b00..e4c9dd1e96c3c 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/EhcacheDiskCacheSettings.java @@ -101,6 +101,7 @@ public class EhcacheDiskCacheSettings { /** * Disk cache max size setting. + * If this cache is used as a tier in a TieredSpilloverCache, this setting is ignored. */ public static final Setting.AffixSetting DISK_CACHE_MAX_SIZE_IN_BYTES_SETTING = Setting.suffixKeySetting( EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ".max_size_in_bytes", diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 0fa0f8162bb98..33c27eb301ad1 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -680,6 +680,11 @@ private V deserializeValue(ByteArrayWrapper binary) { return valueSerializer.deserialize(binary.value); } + // Pkg-private for testing. + long getMaximumWeight() { + return maxWeightInBytes; + } + /** * Factory to create an ehcache disk cache. */ diff --git a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java index a0d0aa4ec4914..4e879af052c15 100644 --- a/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java +++ b/plugins/cache-ehcache/src/test/java/org/opensearch/cache/store/disk/EhCacheDiskCacheTests.java @@ -20,11 +20,13 @@ import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.serializer.BytesReferenceSerializer; import org.opensearch.common.cache.serializer.Serializer; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; @@ -1201,6 +1203,65 @@ public void testEhcacheCloseWithDestroyCacheMethodThrowingException() throws Exc ehcacheDiskCache.close(); } + public void testWithCacheConfigSizeSettings() throws Exception { + // The cache should get its size from the config if present, and otherwise should get it from the setting. + long maxSizeFromSetting = between(MINIMUM_MAX_SIZE_IN_BYTES + 1000, MINIMUM_MAX_SIZE_IN_BYTES + 2000); + long maxSizeFromConfig = between(MINIMUM_MAX_SIZE_IN_BYTES + 3000, MINIMUM_MAX_SIZE_IN_BYTES + 4000); + + EhcacheDiskCache cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, false); + assertEquals(maxSizeFromSetting, cache.getMaximumWeight()); + + cache = setupMaxSizeTest(maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, cache.getMaximumWeight()); + } + + // Modified from OpenSearchOnHeapCacheTests. Can't reuse, as we can't add a dependency on the server.test module. + private EhcacheDiskCache setupMaxSizeTest(long maxSizeFromSetting, long maxSizeFromConfig, boolean putSizeInConfig) + throws Exception { + MockRemovalListener listener = new MockRemovalListener<>(); + try (NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) { + Settings settings = Settings.builder() + .put(FeatureFlags.PLUGGABLE_CACHE, true) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + EhcacheDiskCache.EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_MAX_SIZE_IN_BYTES_KEY) + .getKey(), + maxSizeFromSetting + ) + .put( + EhcacheDiskCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(DISK_STORAGE_PATH_KEY) + .getKey(), + env.nodePaths()[0].indicesPath.toString() + "/request_cache/" + 0 + ) + .build(); + + CacheConfig.Builder cacheConfigBuilder = new CacheConfig.Builder().setKeyType(String.class) + .setValueType(String.class) + .setKeySerializer(new StringSerializer()) + .setValueSerializer(new StringSerializer()) + .setWeigher(getWeigher()) + .setRemovalListener(listener) + .setSettings(settings) + .setDimensionNames(List.of(dimensionName)) + .setStatsTrackingEnabled(true); + if (putSizeInConfig) { + cacheConfigBuilder.setMaxSizeInBytes(maxSizeFromConfig); + } + + ICache.Factory cacheFactory = new EhcacheDiskCache.EhcacheDiskCacheFactory(); + return (EhcacheDiskCache) cacheFactory.create( + cacheConfigBuilder.build(), + CacheType.INDICES_REQUEST_CACHE, + null + ); + } + } + static class MockEhcahceDiskCache extends EhcacheDiskCache { public MockEhcahceDiskCache(Builder builder) { diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index 01da78ecec52e..da006264094d2 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -46,11 +46,8 @@ public CacheService(Map cacheStoreTypeFactories, Setting } public ICache createCache(CacheConfig config, CacheType cacheType) { - Setting cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( - cacheType.getSettingPrefix() - ); - String storeName = cacheSettingForCacheType.get(settings); - if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + String storeName = getStoreNameFromSetting(cacheType, settings); + if (!pluggableCachingEnabled(cacheType, settings)) { // Condition 1: In case feature flag is off, we default to onHeap. // Condition 2: In case storeName is not explicitly mentioned, we assume user is looking to use older // settings, so we again fallback to onHeap to maintain backward compatibility. @@ -74,4 +71,19 @@ public NodeCacheStats stats(CommonStatsFlags flags) { } return new NodeCacheStats(statsMap, flags); } + + /** + * Check if pluggable caching is on, and if a store type is present for this cache type. + */ + public static boolean pluggableCachingEnabled(CacheType cacheType, Settings settings) { + String storeName = getStoreNameFromSetting(cacheType, settings); + return FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) && storeName != null && !storeName.isBlank(); + } + + private static String getStoreNameFromSetting(CacheType cacheType, Settings settings) { + Setting cacheSettingForCacheType = CacheSettings.CACHE_TYPE_STORE_NAME.getConcreteSettingForNamespace( + cacheType.getSettingPrefix() + ); + return cacheSettingForCacheType.get(settings); + } } diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 571383a9fce6a..e1039c5d9ee55 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -17,6 +17,7 @@ import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.CacheStatsHolder; import org.opensearch.common.cache.stats.DefaultCacheStatsHolder; @@ -80,7 +81,7 @@ public OpenSearchOnHeapCache(Builder builder) { this.weigher = builder.getWeigher(); } - // package private for testing + // pkg-private for testing long getMaximumWeight() { return this.maximumWeight; } @@ -192,8 +193,12 @@ public ICache create(CacheConfig config, CacheType cacheType, ); long maxSizeInBytes = ((ByteSizeValue) settingList.get(MAXIMUM_SIZE_IN_BYTES_KEY).get(settings)).getBytes(); - if (config.getMaxSizeInBytes() > 0) { // If this is passed from upstream(like tieredCache), then use this - // instead. + if (config.getMaxSizeInBytes() > 0) { + /* + Use the cache config value if present. + This can be passed down from the TieredSpilloverCache when creating individual segments, + but is not passed in from the IRC if pluggable caching is on. + */ builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); } else { builder.setMaximumWeightInBytes(maxSizeInBytes); @@ -204,8 +209,7 @@ public ICache create(CacheConfig config, CacheType cacheType, builder.setNumberOfSegments(-1); // By default it will use 256 segments. } - String storeName = cacheSettingForCacheType.get(settings); - if (!FeatureFlags.PLUGGABLE_CACHE_SETTING.get(settings) || (storeName == null || storeName.isBlank())) { + if (!CacheService.pluggableCachingEnabled(cacheType, settings)) { // For backward compatibility as the user intent is to use older settings. builder.setMaximumWeightInBytes(config.getMaxSizeInBytes()); builder.setExpireAfterAccess(config.getExpireAfterAccess()); diff --git a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java index 5a2964ad011bf..8ba356f9e0597 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java +++ b/server/src/main/java/org/opensearch/common/cache/store/settings/OpenSearchOnHeapCacheSettings.java @@ -26,6 +26,7 @@ public class OpenSearchOnHeapCacheSettings { /** * Setting to define maximum size for the cache as a percentage of heap memory available. + * If this cache is used as a tier in a TieredSpilloverCache, this setting is ignored. * * Setting pattern: {cache_type}.opensearch_onheap.size */ diff --git a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java index 3d158cb60a208..4f42cd8fe8672 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/opensearch/indices/IndicesRequestCache.java @@ -124,10 +124,18 @@ public final class IndicesRequestCache implements RemovalListener INDICES_CACHE_QUERY_SIZE = Setting.memorySizeSetting( "indices.requests.cache.size", "1%", - Property.NodeScope + Property.NodeScope, + Property.Deprecated ); public static final Setting INDICES_CACHE_QUERY_EXPIRE = Setting.positiveTimeSetting( "indices.requests.cache.expire", @@ -166,7 +174,6 @@ public final class IndicesRequestCache implements RemovalListener registeredClosedListeners = ConcurrentCollections.newConcurrentMap(); - private final ByteSizeValue size; private final TimeValue expire; private final ICache cache; private final ClusterService clusterService; @@ -187,10 +194,7 @@ public final class IndicesRequestCache implements RemovalListener, BytesReference> weigher = (k, v) -> k.ramBytesUsed(k.key.ramBytesUsed()) + v.ramBytesUsed(); this.cacheCleanupManager = new IndicesRequestCacheCleanupManager( threadPool, INDICES_REQUEST_CACHE_CLEANUP_INTERVAL_SETTING.get(settings), @@ -200,30 +204,42 @@ public final class IndicesRequestCache implements RemovalListener().setSettings(settings) - .setWeigher(weigher) - .setValueType(BytesReference.class) - .setKeyType(Key.class) - .setRemovalListener(this) - .setMaxSizeInBytes(sizeInBytes) // for backward compatibility - .setExpireAfterAccess(expire) // for backward compatibility - .setDimensionNames(List.of(INDEX_DIMENSION_NAME, SHARD_ID_DIMENSION_NAME)) - .setCachedResultParser((bytesReference) -> { - try { - return CachedQueryResult.getPolicyValues(bytesReference); - } catch (IOException e) { - // Set took time to -1, which will always be rejected by the policy. - return new CachedQueryResult.PolicyValues(-1); - } - }) - .setKeySerializer(new IRCKeyWriteableSerializer()) - .setValueSerializer(new BytesReferenceSerializer()) - .setClusterSettings(clusterService.getClusterSettings()) - .setStoragePath(nodeEnvironment.nodePaths()[0].path.toString() + "/request_cache") - .build(), - CacheType.INDICES_REQUEST_CACHE - ); + + CacheConfig config = getCacheConfig(settings, nodeEnvironment); + this.cache = cacheService.createCache(config, CacheType.INDICES_REQUEST_CACHE); + } + + // pkg-private for testing + CacheConfig getCacheConfig(Settings settings, NodeEnvironment nodeEnvironment) { + long sizeInBytes = INDICES_CACHE_QUERY_SIZE.get(settings).getBytes(); + ToLongBiFunction, BytesReference> weigher = (k, v) -> k.ramBytesUsed(k.key.ramBytesUsed()) + v.ramBytesUsed(); + CacheConfig.Builder configBuilder = new CacheConfig.Builder().setSettings(settings) + .setWeigher(weigher) + .setValueType(BytesReference.class) + .setKeyType(Key.class) + .setRemovalListener(this) + .setExpireAfterAccess(expire) // for backward compatibility + .setDimensionNames(List.of(INDEX_DIMENSION_NAME, SHARD_ID_DIMENSION_NAME)) + .setCachedResultParser((bytesReference) -> { + try { + return CachedQueryResult.getPolicyValues(bytesReference); + } catch (IOException e) { + // Set took time to -1, which will always be rejected by the policy. + return new CachedQueryResult.PolicyValues(-1); + } + }) + .setKeySerializer(new IRCKeyWriteableSerializer()) + .setValueSerializer(new BytesReferenceSerializer()) + .setClusterSettings(clusterService.getClusterSettings()) + .setStoragePath(nodeEnvironment.nodePaths()[0].path.toString() + "/request_cache"); + + if (!CacheService.pluggableCachingEnabled(CacheType.INDICES_REQUEST_CACHE, settings)) { + // If pluggable caching is not enabled, use the max size based on the IRC setting into the config. + // If pluggable caching is enabled, cache implementations instead determine their own sizes based on their own implementation + // size settings. + configBuilder.setMaxSizeInBytes(sizeInBytes); + } + return configBuilder.build(); } // package private for testing diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 45a7b273eb41e..5a989ad8ab777 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -15,6 +15,7 @@ import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; import org.opensearch.common.cache.store.config.CacheConfig; @@ -105,35 +106,69 @@ public void testStatsWithoutPluggableCaches() throws Exception { } } - public void testWithCacheConfigSettings() { - MockRemovalListener listener = new MockRemovalListener<>(); - int maxKeys = between(10, 50); - ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); - Settings settings = Settings.builder() - .put( - OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) - .get(MAXIMUM_SIZE_IN_BYTES_KEY) - .getKey(), - 1000 + "b" // Setting some random value which shouldn't be honored. - ) + public void testWithCacheConfigSizeSettings_WhenPluggableCachingOff() { + // The "pluggable caching off" case can happen when the PLUGGABLE_CACHE setting is false, or if the store name is blank. + // The cache should get its size from the config, not the setting, in either case. + Settings.Builder settingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, false); + long maxSizeFromSetting = between(1000, 2000); + long maxSizeFromConfig = between(3000, 4000); + OpenSearchOnHeapCache onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); + + Settings.Builder storeNameBlankSettingsBuilder = Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, true); + onHeapCache = setupMaxSizeTest(storeNameBlankSettingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); + } + + public void testWithCacheConfigSettings_WhenPluggableCachingOn() { + // When pluggable caching is on, the cache should get its size from the config if present, and otherwise should get it from the + // setting. + Settings.Builder settingsBuilder = Settings.builder() .put(FeatureFlags.PLUGGABLE_CACHE, true) - .build(); + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ); + long maxSizeFromSetting = between(1000, 2000); + long maxSizeFromConfig = between(3000, 4000); + OpenSearchOnHeapCache onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, false); + assertEquals(maxSizeFromSetting, onHeapCache.getMaximumWeight()); + + onHeapCache = setupMaxSizeTest(settingsBuilder, maxSizeFromSetting, maxSizeFromConfig, true); + assertEquals(maxSizeFromConfig, onHeapCache.getMaximumWeight()); + } - CacheConfig cacheConfig = new CacheConfig.Builder().setKeyType(String.class) + private OpenSearchOnHeapCache setupMaxSizeTest( + Settings.Builder settingsBuilder, + long maxSizeFromSetting, + long maxSizeFromConfig, + boolean putSizeInConfig + ) { + MockRemovalListener listener = new MockRemovalListener<>(); + settingsBuilder.put( + OpenSearchOnHeapCacheSettings.getSettingListForCacheType(CacheType.INDICES_REQUEST_CACHE) + .get(MAXIMUM_SIZE_IN_BYTES_KEY) + .getKey(), + maxSizeFromSetting + "b" + ); + + CacheConfig.Builder cacheConfigBuilder = new CacheConfig.Builder().setKeyType(String.class) .setValueType(String.class) .setWeigher((k, v) -> keyValueSize) .setRemovalListener(listener) - .setSettings(settings) + .setSettings(settingsBuilder.build()) .setDimensionNames(dimensionNames) - .setMaxSizeInBytes(maxKeys * keyValueSize) // this should get honored - .setStatsTrackingEnabled(true) - .build(); - OpenSearchOnHeapCache onHeapCache = (OpenSearchOnHeapCache) onHeapCacheFactory.create( - cacheConfig, + .setStatsTrackingEnabled(true); + if (putSizeInConfig) { + cacheConfigBuilder.setMaxSizeInBytes(maxSizeFromConfig); + } + + ICache.Factory onHeapCacheFactory = new OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory(); + return (OpenSearchOnHeapCache) onHeapCacheFactory.create( + cacheConfigBuilder.build(), CacheType.INDICES_REQUEST_CACHE, null ); - assertEquals(maxKeys * keyValueSize, onHeapCache.getMaximumWeight()); } private void assertZeroStats(ImmutableCacheStatsHolder stats) { diff --git a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java index 78782112be844..c90924cfc0fd1 100644 --- a/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/MemorySizeSettingsTests.java @@ -81,6 +81,9 @@ public void testIndicesRequestCacheSetting() { "indices.requests.cache.size", new ByteSizeValue((long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.01)) ); + assertWarnings( + "[indices.requests.cache.size] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." + ); } public void testCircuitBreakerSettings() { diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java index 1a3aece74b3e2..e83ca247b6a1d 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java @@ -53,12 +53,16 @@ import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.cluster.routing.UnassignedInfo; import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.ICacheKey; import org.opensearch.common.cache.RemovalNotification; import org.opensearch.common.cache.RemovalReason; import org.opensearch.common.cache.module.CacheModule; +import org.opensearch.common.cache.settings.CacheSettings; import org.opensearch.common.cache.stats.ImmutableCacheStats; import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder; +import org.opensearch.common.cache.store.OpenSearchOnHeapCache; +import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; @@ -852,6 +856,42 @@ public void testAddingToCleanupKeyToCountMapWorksAppropriatelyWithMultipleThread assertFalse(concurrentModificationExceptionDetected.get()); } + public void testCacheMaxSize_WhenPluggableCachingOff() throws Exception { + // If pluggable caching is off, the IRC should put a max size value into the cache config that it uses to create its cache. + threadPool = getThreadPool(); + long cacheSize = 1000; + Settings settings = Settings.builder().put(INDICES_CACHE_QUERY_SIZE.getKey(), cacheSize + "b").build(); + cache = getIndicesRequestCache(settings); + CacheConfig config; + try (NodeEnvironment env = newNodeEnvironment(settings)) { + // For the purposes of this test it doesn't matter if the node environment matches the one used in the constructor + config = cache.getCacheConfig(settings, env); + } + assertEquals(cacheSize, (long) config.getMaxSizeInBytes()); + allowDeprecationWarning(); + } + + public void testCacheMaxSize_WhenPluggableCachingOn() throws Exception { + // If pluggable caching is on, and a store name is present, the IRC should NOT put a max size value into the cache config. + threadPool = getThreadPool(); + Settings settings = Settings.builder() + .put(INDICES_CACHE_QUERY_SIZE.getKey(), 1000 + "b") + .put(FeatureFlags.PLUGGABLE_CACHE, true) + .put( + CacheSettings.getConcreteStoreNameSettingForCacheType(CacheType.INDICES_REQUEST_CACHE).getKey(), + OpenSearchOnHeapCache.OpenSearchOnHeapCacheFactory.NAME + ) + .build(); + cache = getIndicesRequestCache(settings); + CacheConfig config; + try (NodeEnvironment env = newNodeEnvironment(settings)) { + // For the purposes of this test it doesn't matter if the node environment matches the one used in the constructor + config = cache.getCacheConfig(settings, env); + } + assertEquals(0, (long) config.getMaxSizeInBytes()); + allowDeprecationWarning(); + } + private IndicesRequestCache getIndicesRequestCache(Settings settings) throws IOException { IndicesService indicesService = getInstanceFromNode(IndicesService.class); try (NodeEnvironment env = newNodeEnvironment(settings)) { @@ -1095,6 +1135,7 @@ public void testEviction() throws Exception { assertEquals(2, cache.count()); assertEquals(1, indexShard.requestCache().stats().getEvictions()); IOUtils.close(reader, secondReader, thirdReader, environment); + allowDeprecationWarning(); } public void testClearAllEntityIdentity() throws Exception { @@ -1372,6 +1413,7 @@ public void testGetOrComputeConcurrentlyWithMultipleIndices() throws Exception { } IOUtils.close(cache); executorService.shutdownNow(); + allowDeprecationWarning(); } public void testDeleteAndCreateIndexShardOnSameNodeAndVerifyStats() throws Exception { @@ -1540,6 +1582,12 @@ public static String generateString(int length) { return sb.toString(); } + private void allowDeprecationWarning() { + assertWarnings( + "[indices.requests.cache.size] setting was deprecated in OpenSearch and will be removed in a future release! See the breaking changes documentation for the next major version." + ); + } + private class TestBytesReference extends AbstractBytesReference { int dummyValue; From abb81120ab4cc9b61e91f5b57a797409a951e3d7 Mon Sep 17 00:00:00 2001 From: Karen X Date: Thu, 16 Jan 2025 15:23:29 -0500 Subject: [PATCH 35/37] Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology (#17037) * [GRPC] Rename AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology Signed-off-by: Karen Xu * Update CHANGELOG Signed-off-by: Karen Xu * remove extra space in CHANGELOG Signed-off-by: Karen Xu * Update plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java Co-authored-by: Andriy Redko Signed-off-by: Karen X --------- Signed-off-by: Karen Xu Signed-off-by: Karen X Co-authored-by: Andriy Redko --- CHANGELOG.md | 1 + .../opensearch/transport/grpc/GrpcPlugin.java | 4 ++-- .../grpc/Netty4GrpcServerTransport.java | 17 +++++++++-------- .../grpc/Netty4GrpcServerTransportTests.java | 2 +- .../java/org/opensearch/bootstrap/Security.java | 6 +++--- .../org/opensearch/plugins/NetworkPlugin.java | 4 ++-- 6 files changed, 18 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 21cdf30867e74..9d9b9e54c3640 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,6 +100,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix Shallow copy snapshot failures on closed index ([#16868](https://github.com/opensearch-project/OpenSearch/pull/16868)) - Fix multi-value sort for unsigned long ([#16732](https://github.com/opensearch-project/OpenSearch/pull/16732)) - The `phone-search` analyzer no longer emits the tel/sip prefix, international calling code, extension numbers and unformatted input as a token ([#16993](https://github.com/opensearch-project/OpenSearch/pull/16993)) +- Fix GRPC AUX_TRANSPORT_PORT and SETTING_GRPC_PORT settings and remove lingering HTTP terminology ([#17037](https://github.com/opensearch-project/OpenSearch/pull/17037)) ### Security diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java index 0a464e135350b..7f02983010f98 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/GrpcPlugin.java @@ -25,7 +25,7 @@ import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.GRPC_TRANSPORT_SETTING_KEY; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_BIND_HOST; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_HOST; -import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORTS; +import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PORT; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_HOST; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_PUBLISH_PORT; import static org.opensearch.transport.grpc.Netty4GrpcServerTransport.SETTING_GRPC_WORKER_COUNT; @@ -58,7 +58,7 @@ public Map> getAuxTransports( @Override public List> getSettings() { return List.of( - SETTING_GRPC_PORTS, + SETTING_GRPC_PORT, SETTING_GRPC_HOST, SETTING_GRPC_PUBLISH_HOST, SETTING_GRPC_BIND_HOST, diff --git a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java index 61c0722772b92..1fb6a0bca03ea 100644 --- a/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java +++ b/plugins/transport-grpc/src/main/java/org/opensearch/transport/grpc/Netty4GrpcServerTransport.java @@ -63,9 +63,9 @@ public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { /** * Port range on which to bind. - * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORTS where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY. + * Note this setting is configured through AffixSetting AUX_TRANSPORT_PORT where the aux transport type matches the GRPC_TRANSPORT_SETTING_KEY. */ - public static final Setting SETTING_GRPC_PORTS = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace( + public static final Setting SETTING_GRPC_PORT = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace( GRPC_TRANSPORT_SETTING_KEY ); @@ -134,20 +134,21 @@ public class Netty4GrpcServerTransport extends NetworkPlugin.AuxTransport { * @param networkService the bind/publish addresses. */ public Netty4GrpcServerTransport(Settings settings, List services, NetworkService networkService) { + logger.debug("Initializing Netty4GrpcServerTransport with settings = {}", settings); this.settings = Objects.requireNonNull(settings); this.services = Objects.requireNonNull(services); this.networkService = Objects.requireNonNull(networkService); - final List httpBindHost = SETTING_GRPC_BIND_HOST.get(settings); - this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : httpBindHost).toArray( + final List grpcBindHost = SETTING_GRPC_BIND_HOST.get(settings); + this.bindHosts = (grpcBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : grpcBindHost).toArray( Strings.EMPTY_ARRAY ); - final List httpPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); - this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : httpPublishHost) + final List grpcPublishHost = SETTING_GRPC_PUBLISH_HOST.get(settings); + this.publishHosts = (grpcPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings) : grpcPublishHost) .toArray(Strings.EMPTY_ARRAY); - this.port = SETTING_GRPC_PORTS.get(settings); + this.port = SETTING_GRPC_PORT.get(settings); this.nettyEventLoopThreads = SETTING_GRPC_WORKER_COUNT.get(settings); } @@ -229,7 +230,7 @@ private void bindServer() { + publishInetAddress + "). " + "Please specify a unique port by setting " - + SETTING_GRPC_PORTS.getKey() + + SETTING_GRPC_PORT.getKey() + " or " + SETTING_GRPC_PUBLISH_PORT.getKey() ); diff --git a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java index ebeff62c2c23c..8cf44eebb293e 100644 --- a/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java +++ b/plugins/transport-grpc/src/test/java/org/opensearch/transport/grpc/Netty4GrpcServerTransportTests.java @@ -44,6 +44,6 @@ public void test() { } private static Settings createSettings() { - return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORTS.getKey(), getPortRange()).build(); + return Settings.builder().put(Netty4GrpcServerTransport.SETTING_GRPC_PORT.getKey(), getPortRange()).build(); } } diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 9f1dcbe8fb587..563a026109059 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -74,7 +74,7 @@ import static org.opensearch.bootstrap.FilePermissionUtils.addDirectoryPath; import static org.opensearch.bootstrap.FilePermissionUtils.addSingleFilePath; import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_PORT_DEFAULTS; -import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORTS; +import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_PORT; import static org.opensearch.plugins.NetworkPlugin.AuxTransport.AUX_TRANSPORT_TYPES_SETTING; /** @@ -423,7 +423,7 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S } /** - * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORTS. + * Add dynamic {@link SocketPermission} based on AffixSetting AUX_TRANSPORT_PORT. * If an auxiliary transport type is enabled but has no corresponding port range setting fall back to AUX_PORT_DEFAULTS. * * @param policy the {@link Permissions} instance to apply the dynamic {@link SocketPermission}s to. @@ -432,7 +432,7 @@ private static void addSocketPermissionForHttp(final Permissions policy, final S private static void addSocketPermissionForAux(final Permissions policy, final Settings settings) { Set portsRanges = new HashSet<>(); for (String auxType : AUX_TRANSPORT_TYPES_SETTING.get(settings)) { - Setting auxTypePortSettings = AUX_TRANSPORT_PORTS.getConcreteSettingForNamespace(auxType); + Setting auxTypePortSettings = AUX_TRANSPORT_PORT.getConcreteSettingForNamespace(auxType); if (auxTypePortSettings.exists(settings)) { portsRanges.add(auxTypePortSettings.get(settings)); } else { diff --git a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java index 516aa94534f94..4442189373c93 100644 --- a/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/NetworkPlugin.java @@ -79,9 +79,9 @@ abstract class AuxTransport extends AbstractLifecycleComponent { public static final String AUX_SETTINGS_PREFIX = "aux.transport."; public static final String AUX_TRANSPORT_TYPES_KEY = AUX_SETTINGS_PREFIX + "types"; public static final String AUX_PORT_DEFAULTS = "9400-9500"; - public static final Setting.AffixSetting AUX_TRANSPORT_PORTS = affixKeySetting( + public static final Setting.AffixSetting AUX_TRANSPORT_PORT = affixKeySetting( AUX_SETTINGS_PREFIX, - "ports", + "port", key -> new Setting<>(key, AUX_PORT_DEFAULTS, PortsRange::new, Setting.Property.NodeScope) ); From fe1f0d814230413a3589204376d665e360da4b96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EC=A1=B0=ED=98=9C=EC=98=A8?= <68319395+hye-on@users.noreply.github.com> Date: Fri, 17 Jan 2025 21:33:46 +0900 Subject: [PATCH 36/37] Fix getTime field name to time in GetStats (#16894) (#17009) * Fix getTime field name to time in GetStats (#16894) Signed-off-by: hye-on * Update PR number in changelog Signed-off-by: hye-on * Deprecate getTime field and add time field in GetStats for backward compatibility Signed-off-by: hye-on * Add forRemoval flag to getTime field for future removal Signed-off-by: hye-on * Changed to use field instead of humanReadableField for GET_TIME in JSON response Replaced the use of builder.humanReadableField for the GET_TIME field with builder.field(Fields.GET_TIME, Objects.toString(getTime())). This prevents the duplication of the time_in_millis field. Signed-off-by: hye-on * Add test to validate getTime and time fields in _stats API response getTime and time fields are verified to be included in the _stats API response and correctly aligned. Signed-off-by: hye-on * Fix formatting in GetStats.java Signed-off-by: hye-on * Rename test file to better reflect test purpose Signed-off-by: hye-on * Test Add skip version for stats API human filter test under 2.19.99 Signed-off-by: hye-on * Remove unnecessary changelog entries Signed-off-by: hye-on * Add a line for styling purposes Signed-off-by: hye-on --------- Signed-off-by: hye-on --- CHANGELOG.md | 2 ++ ...include_both_time_and_gettime_in_stats.yml | 36 +++++++++++++++++++ .../org/opensearch/index/get/GetStats.java | 9 ++++- 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d9b9e54c3640..c9d7d9a60a3e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Changes to support IP field in star tree indexing([#16641](https://github.com/opensearch-project/OpenSearch/pull/16641/)) - Support object fields in star-tree index([#16728](https://github.com/opensearch-project/OpenSearch/pull/16728/)) - Support searching from doc_value using termQueryCaseInsensitive/termQuery in flat_object/keyword field([#16974](https://github.com/opensearch-project/OpenSearch/pull/16974/)) +- Added a new `time` field to replace the deprecated `getTime` field in `GetStats`. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009)) ### Dependencies - Bump `com.google.cloud:google-cloud-core-http` from 2.23.0 to 2.47.0 ([#16504](https://github.com/opensearch-project/OpenSearch/pull/16504)) @@ -75,6 +76,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated - Performing update operation with default pipeline or final pipeline is deprecated ([#16712](https://github.com/opensearch-project/OpenSearch/pull/16712)) +- Marked `getTime` field as deprecated in favor of the new `time` field. ([#17009](https://github.com/opensearch-project/OpenSearch/pull/17009)) ### Removed diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml new file mode 100644 index 0000000000000..d5e3e7554b400 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.stats/60_include_both_time_and_gettime_in_stats.yml @@ -0,0 +1,36 @@ +--- +setup: + - do: + indices.create: + index: test1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + wait_for_active_shards: all + + - do: + index: + index: test1 + id: 1 + body: { "foo": "bar" } + + - do: + indices.refresh: + index: test1 + +--- +"Test _stats API includes both time and getTime metrics with human filter": + - skip: + version: " - 2.19.99" + reason: "this change is added in 3.0.0" + + - do: + indices.stats: + metric: [ get ] + human: true + + - is_true: _all.primaries.get.time + - is_true: _all.primaries.get.getTime + - match: { _all.primaries.get.time: "0s" } + - match: { _all.primaries.get.getTime: "0s" } diff --git a/server/src/main/java/org/opensearch/index/get/GetStats.java b/server/src/main/java/org/opensearch/index/get/GetStats.java index a366014fe228e..55f14294d774b 100644 --- a/server/src/main/java/org/opensearch/index/get/GetStats.java +++ b/server/src/main/java/org/opensearch/index/get/GetStats.java @@ -41,6 +41,7 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; /** * Stats for a search get @@ -137,6 +138,7 @@ public long current() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.GET); builder.field(Fields.TOTAL, getCount()); + builder.field(Fields.GET_TIME, Objects.toString(getTime())); builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, getTime()); builder.field(Fields.EXISTS_TOTAL, existsCount); builder.humanReadableField(Fields.EXISTS_TIME_IN_MILLIS, Fields.EXISTS_TIME, getExistsTime()); @@ -155,7 +157,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws static final class Fields { static final String GET = "get"; static final String TOTAL = "total"; - static final String TIME = "getTime"; + /** + * Deprecated field name for time. Use {@link #TIME} instead. + */ + @Deprecated(forRemoval = true) + static final String GET_TIME = "getTime"; + static final String TIME = "time"; static final String TIME_IN_MILLIS = "time_in_millis"; static final String EXISTS_TOTAL = "exists_total"; static final String EXISTS_TIME = "exists_time"; From a72e95a810be51bbe883aba9507093deb2f14a12 Mon Sep 17 00:00:00 2001 From: Daniel Widdis Date: Fri, 17 Jan 2025 15:54:55 -0800 Subject: [PATCH 37/37] Add Craig Perkins as OpenSearch Maintainer (#17046) Signed-off-by: Daniel Widdis --- MAINTAINERS.md | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 4a8aa9305df74..93821a3da4c71 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -13,6 +13,7 @@ This document contains a list of maintainers in this repo. See [opensearch-proje | Ashish Singh | [ashking94](https://github.com/ashking94) | Amazon | | Bukhtawar Khan | [Bukhtawar](https://github.com/Bukhtawar) | Amazon | | Charlotte Henkle | [CEHENKLE](https://github.com/CEHENKLE) | Amazon | +| Craig Perkins | [cwperks](https://github.com/cwperks) | Amazon | | Dan Widdis | [dbwiddis](https://github.com/dbwiddis) | Amazon | | Daniel "dB." Doubrovkine | [dblock](https://github.com/dblock) | Amazon | | Gao Binlong | [gaobinlong](https://github.com/gaobinlong) | Amazon |