diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 8c4f4d59ea1fc..af83f8b2edac2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -722,6 +722,12 @@ updates: - directory: /plugins/ open-pull-requests-limit: 1 package-ecosystem: gradle + ignore: + # For all packages, ignore all major versions to minimize breaking issues + - dependency-name: "com.google.cloud:google-cloud-storage" + update-types: [ "version-update:semver-major" ] + - dependency-name: "com.google.api-client:google-api-client" + update-types: [ "version-update:semver-major" ] schedule: interval: weekly labels: diff --git a/.github/workflows/delete_backport_branch.yml b/.github/workflows/delete_backport_branch.yml index 91626a0bc5092..1c73d3d250ce3 100644 --- a/.github/workflows/delete_backport_branch.yml +++ b/.github/workflows/delete_backport_branch.yml @@ -12,7 +12,7 @@ jobs: if: startsWith(github.event.pull_request.head.ref,'backport/') steps: - name: Delete merged branch - uses: actions/github-script@v5 + uses: actions/github-script@v7 with: script: | github.rest.git.deleteRef({ diff --git a/CHANGELOG.md b/CHANGELOG.md index 21fa7f6ded225..48628b476a1e0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,20 +11,32 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fallback to Remote cluster-state on Term-Version check mismatch - ([#15424](https://github.com/opensearch-project/OpenSearch/pull/15424)) - Implement WithFieldName interface in ValuesSourceAggregationBuilder & FieldSortBuilder ([#15916](https://github.com/opensearch-project/OpenSearch/pull/15916)) - Add successfulSearchShardIndices in searchRequestContext ([#15967](https://github.com/opensearch-project/OpenSearch/pull/15967)) -- Remove identity-related feature flagged code from the RestController ([#15430](https://github.com/opensearch-project/OpenSearch/pull/15430)) -- Remove Identity FeatureFlag ([#16024](https://github.com/opensearch-project/OpenSearch/pull/16024)) +- Add support for msearch API to pass search pipeline name - ([#15923](https://github.com/opensearch-project/OpenSearch/pull/15923)) +- Add _list/indices API as paginated alternate to _cat/indices ([#14718](https://github.com/opensearch-project/OpenSearch/pull/14718)) +- Add success and failure metrics for async shard fetch ([#15976](https://github.com/opensearch-project/OpenSearch/pull/15976)) +- Add new metric REMOTE_STORE to NodeStats API response ([#15611](https://github.com/opensearch-project/OpenSearch/pull/15611)) ### Dependencies - Bump `com.azure:azure-identity` from 1.13.0 to 1.13.2 ([#15578](https://github.com/opensearch-project/OpenSearch/pull/15578)) - Bump `protobuf` from 3.22.3 to 3.25.4 ([#15684](https://github.com/opensearch-project/OpenSearch/pull/15684)) -- Bump `org.apache.logging.log4j:log4j-core` from 2.23.1 to 2.24.0 ([#15858](https://github.com/opensearch-project/OpenSearch/pull/15858)) +- Bump `org.apache.logging.log4j:log4j-core` from 2.23.1 to 2.24.1 ([#15858](https://github.com/opensearch-project/OpenSearch/pull/15858), [#16134](https://github.com/opensearch-project/OpenSearch/pull/16134)) - Bump `peter-evans/create-pull-request` from 6 to 7 ([#15863](https://github.com/opensearch-project/OpenSearch/pull/15863)) - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.9.1 to 11.19.1 ([#15862](https://github.com/opensearch-project/OpenSearch/pull/15862)) - Bump `com.microsoft.azure:msal4j` from 1.17.0 to 1.17.1 ([#15945](https://github.com/opensearch-project/OpenSearch/pull/15945)) - Bump `ch.qos.logback:logback-core` from 1.5.6 to 1.5.8 ([#15946](https://github.com/opensearch-project/OpenSearch/pull/15946)) - Update protobuf from 3.25.4 to 3.25.5 ([#16011](https://github.com/opensearch-project/OpenSearch/pull/16011)) +- Bump `org.roaringbitmap:RoaringBitmap` from 1.2.1 to 1.3.0 ([#16040](https://github.com/opensearch-project/OpenSearch/pull/16040)) +- Bump `com.nimbusds:nimbus-jose-jwt` from 9.40 to 9.41.1 ([#16038](https://github.com/opensearch-project/OpenSearch/pull/16038)) +- Bump `actions/github-script` from 5 to 7 ([#16039](https://github.com/opensearch-project/OpenSearch/pull/16039)) +- Bump `dnsjava:dnsjava` from 3.6.1 to 3.6.2 ([#16041](https://github.com/opensearch-project/OpenSearch/pull/16041)) +- Bump `com.maxmind.geoip2:geoip2` from 4.2.0 to 4.2.1 ([#16042](https://github.com/opensearch-project/OpenSearch/pull/16042)) +- Bump `com.maxmind.db:maxmind-db` from 3.1.0 to 3.1.1 ([#16137](https://github.com/opensearch-project/OpenSearch/pull/16137)) +- Bump `com.azure:azure-core-http-netty` from 1.15.3 to 1.15.4 ([#16133](https://github.com/opensearch-project/OpenSearch/pull/16133)) ### Changed +- Add support for docker compose v2 in TestFixturesPlugin ([#16049](https://github.com/opensearch-project/OpenSearch/pull/16049)) +- Remove identity-related feature flagged code from the RestController ([#15430](https://github.com/opensearch-project/OpenSearch/pull/15430)) +- Remove Identity FeatureFlag ([#16024](https://github.com/opensearch-project/OpenSearch/pull/16024)) ### Deprecated @@ -35,7 +47,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix wildcard query containing escaped character ([#15737](https://github.com/opensearch-project/OpenSearch/pull/15737)) - Fix case-insensitive query on wildcard field ([#15882](https://github.com/opensearch-project/OpenSearch/pull/15882)) - Add validation for the search backpressure cancellation settings ([#15501](https://github.com/opensearch-project/OpenSearch/pull/15501)) +- Fix search_as_you_type not supporting multi-fields ([#15988](https://github.com/opensearch-project/OpenSearch/pull/15988)) +- Avoid infinite loop when `flat_object` field contains invalid token ([#15985](https://github.com/opensearch-project/OpenSearch/pull/15985)) - Fix infinite loop in nested agg ([#15931](https://github.com/opensearch-project/OpenSearch/pull/15931)) +- Fix race condition in node-join and node-left ([#15521](https://github.com/opensearch-project/OpenSearch/pull/15521)) ### Security diff --git a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java index fc78792bb3551..77d7997d6d48d 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/docker/DockerSupportService.java @@ -106,6 +106,7 @@ public DockerAvailability getDockerAvailability() { Version version = null; boolean isVersionHighEnough = false; boolean isComposeAvailable = false; + boolean isComposeV2Available = false; // Check if the Docker binary exists final Optional dockerBinary = getDockerPath(); @@ -129,6 +130,8 @@ public DockerAvailability getDockerAvailability() { if (lastResult.isSuccess() && composePath.isPresent()) { isComposeAvailable = runCommand(composePath.get(), "version").isSuccess(); } + + isComposeV2Available = runCommand(dockerPath, "compose", "version").isSuccess(); } } } @@ -138,6 +141,7 @@ public DockerAvailability getDockerAvailability() { this.dockerAvailability = new DockerAvailability( isAvailable, isComposeAvailable, + isComposeV2Available, isVersionHighEnough, dockerPath, version, @@ -356,6 +360,11 @@ public static class DockerAvailability { */ public final boolean isComposeAvailable; + /** + * True if docker compose is available. + */ + public final boolean isComposeV2Available; + /** * True if the installed Docker version is >= 17.05 */ @@ -379,6 +388,7 @@ public static class DockerAvailability { DockerAvailability( boolean isAvailable, boolean isComposeAvailable, + boolean isComposeV2Available, boolean isVersionHighEnough, String path, Version version, @@ -386,6 +396,7 @@ public static class DockerAvailability { ) { this.isAvailable = isAvailable; this.isComposeAvailable = isComposeAvailable; + this.isComposeV2Available = isComposeV2Available; this.isVersionHighEnough = isVersionHighEnough; this.path = path; this.version = version; diff --git a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java index e8772522b19a4..f65e231cd2e50 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/testfixtures/TestFixturesPlugin.java @@ -171,7 +171,11 @@ public void execute(Task task) { .findFirst(); composeExtension.getExecutable().set(dockerCompose.isPresent() ? dockerCompose.get() : "/usr/bin/docker"); - composeExtension.getUseDockerComposeV2().set(false); + if (dockerSupport.get().getDockerAvailability().isComposeV2Available) { + composeExtension.getUseDockerComposeV2().set(true); + } else if (dockerSupport.get().getDockerAvailability().isComposeAvailable) { + composeExtension.getUseDockerComposeV2().set(false); + } tasks.named("composeUp").configure(t -> { // Avoid running docker-compose tasks in parallel in CI due to some issues on certain Linux distributions @@ -228,7 +232,8 @@ private void maybeSkipTask(Provider dockerSupport, TaskPro private void maybeSkipTask(Provider dockerSupport, Task task) { task.onlyIf(spec -> { - boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isComposeAvailable; + boolean isComposeAvailable = dockerSupport.get().getDockerAvailability().isComposeV2Available + || dockerSupport.get().getDockerAvailability().isComposeAvailable; if (isComposeAvailable == false) { LOGGER.info("Task {} requires docker-compose but it is unavailable. Task will be skipped.", task.getPath()); } diff --git a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle index 59063b3789c70..4d425964c77af 100644 --- a/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle +++ b/buildSrc/src/testKit/thirdPartyAudit/sample_jars/build.gradle @@ -17,7 +17,7 @@ repositories { } dependencies { - implementation "org.apache.logging.log4j:log4j-core:2.24.0" + implementation "org.apache.logging.log4j:log4j-core:2.24.1" } ["0.0.1", "0.0.2"].forEach { v -> diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index 5c6205ebf24d4..e312a2da77d94 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.10.2-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=fdfca5dbc2834f0ece5020465737538e5ba679deeff5ab6c09621d67f8bb1a15 +distributionSha256Sum=2ab88d6de2c23e6adae7363ae6e29cbdd2a709e992929b48b6530fd0c7133bd6 diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index c0ff155ce1038..f74de1dc290dd 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,9 +39,9 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:4.2.0') + api('com.maxmind.geoip2:geoip2:4.2.1') // geoip2 dependencies: - api('com.maxmind.db:maxmind-db:3.1.0') + api('com.maxmind.db:maxmind-db:3.1.1') api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") diff --git a/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 deleted file mode 100644 index b6bfeeb9da60b..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-4.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78ff932dc13ac41dd1f0fd9e7405a7f4ad815ce0 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-4.2.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.2.1.jar.sha1 new file mode 100644 index 0000000000000..a9dc5483ac727 --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-4.2.1.jar.sha1 @@ -0,0 +1 @@ +9dbf8a8bea88a33e88c46eb3f503721b4bd08b90 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 deleted file mode 100644 index 9db7c7319af0b..0000000000000 --- a/modules/ingest-geoip/licenses/maxmind-db-3.1.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2008992ab45d61c7b28a18678b5df82272529da3 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/maxmind-db-3.1.1.jar.sha1 b/modules/ingest-geoip/licenses/maxmind-db-3.1.1.jar.sha1 new file mode 100644 index 0000000000000..f1a25c6d600c0 --- /dev/null +++ b/modules/ingest-geoip/licenses/maxmind-db-3.1.1.jar.sha1 @@ -0,0 +1 @@ +84a2a5d322fef1f1e82f05095683c8d53902baf1 \ No newline at end of file diff --git a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java index 9446ec1228532..ac6b87aefb3ec 100644 --- a/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java +++ b/modules/ingest-geoip/src/test/java/org/opensearch/ingest/geoip/IngestGeoIpModulePluginTests.java @@ -35,6 +35,7 @@ import com.maxmind.geoip2.model.AbstractResponse; import org.opensearch.common.network.InetAddresses; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.env.TestEnvironment; import org.opensearch.ingest.Processor; @@ -126,6 +127,12 @@ public void testAllowListNotSpecified() throws IOException { } } + public void testSettingsRegistration() { + final IngestGeoIpModulePlugin plugin = new IngestGeoIpModulePlugin(); + final List> settings = plugin.getSettings(); + assertTrue(settings.contains(IngestGeoIpModulePlugin.PROCESSORS_ALLOWLIST_SETTING)); + } + private void runAllowListTest(List allowList) throws IOException { Settings.Builder settingsBuilder = Settings.builder(); createDb(settingsBuilder); diff --git a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java index bac90d20b44e1..f5e2e34278880 100644 --- a/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java +++ b/modules/ingest-user-agent/src/main/java/org/opensearch/ingest/useragent/IngestUserAgentModulePlugin.java @@ -44,6 +44,7 @@ import java.nio.file.Path; import java.nio.file.PathMatcher; import java.nio.file.StandardOpenOption; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -152,6 +153,6 @@ static Map createUserAgentParsers(Path userAgentConfigD @Override public List> getSettings() { - return Collections.singletonList(CACHE_SIZE_SETTING); + return Arrays.asList(CACHE_SIZE_SETTING, PROCESSORS_ALLOWLIST_SETTING); } } diff --git a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java index 31fdafff1188a..563158026c37f 100644 --- a/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java +++ b/modules/ingest-user-agent/src/test/java/org/opensearch/ingest/useragent/IngestUserAgentModulePluginTests.java @@ -8,6 +8,7 @@ package org.opensearch.ingest.useragent; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.env.TestEnvironment; import org.opensearch.ingest.Processor; @@ -89,6 +90,12 @@ public void testAllowListNotSpecified() throws IOException { } } + public void testSettingsRegistration() { + final IngestUserAgentModulePlugin plugin = new IngestUserAgentModulePlugin(); + final List> settings = plugin.getSettings(); + assertTrue(settings.contains(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING)); + } + private void runAllowListTest(List allowList) throws IOException { final Settings settings = settingsBuilder.putList(IngestUserAgentModulePlugin.PROCESSORS_ALLOWLIST_SETTING.getKey(), allowList) .build(); diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 366e848416328..f08815ebbbd1e 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -264,7 +264,15 @@ public SearchAsYouTypeFieldMapper build(Mapper.BuilderContext context) { } ft.setPrefixField(prefixFieldType); ft.setShingleFields(shingleFieldTypes); - return new SearchAsYouTypeFieldMapper(name, ft, copyTo.build(), prefixFieldMapper, shingleFieldMappers, this); + return new SearchAsYouTypeFieldMapper( + name, + ft, + multiFieldsBuilder.build(this, context), + copyTo.build(), + prefixFieldMapper, + shingleFieldMappers, + this + ); } } @@ -623,12 +631,13 @@ public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRew public SearchAsYouTypeFieldMapper( String simpleName, SearchAsYouTypeFieldType mappedFieldType, + MultiFields multiFields, CopyTo copyTo, PrefixFieldMapper prefixField, ShingleFieldMapper[] shingleFields, Builder builder ) { - super(simpleName, mappedFieldType, MultiFields.empty(), copyTo); + super(simpleName, mappedFieldType, multiFields, copyTo); this.prefixField = prefixField; this.shingleFields = shingleFields; this.maxShingleSize = builder.maxShingleSize.getValue(); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index f55ad2e9d659c..7746cb714a019 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -298,6 +298,20 @@ private void assertMultiField(int shingleSize) throws IOException { } } + public void testSubField() throws IOException { + MapperService mapperService = createMapperService( + fieldMapping( + b -> b.field("type", "search_as_you_type") + .startObject("fields") + .startObject("subField") + .field("type", "keyword") + .endObject() + .endObject() + ) + ); + assertThat(mapperService.fieldType("field.subField"), instanceOf(KeywordFieldMapper.KeywordFieldType.class)); + } + public void testIndexOptions() throws IOException { DocumentMapper mapper = createDocumentMapper( fieldMapping(b -> b.field("type", "search_as_you_type").field("index_options", "offsets")) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 616da0be62c30..4baf79e619be9 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -48,7 +48,7 @@ dependencies { api 'com.azure:azure-json:1.1.0' api 'com.azure:azure-xml:1.1.0' api 'com.azure:azure-storage-common:12.25.1' - api 'com.azure:azure-core-http-netty:1.15.3' + api 'com.azure:azure-core-http-netty:1.15.4' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" @@ -63,7 +63,7 @@ dependencies { api "net.java.dev.jna:jna-platform:${versions.jna}" api 'com.microsoft.azure:msal4j:1.17.1' api 'com.nimbusds:oauth2-oidc-sdk:11.19.1' - api 'com.nimbusds:nimbus-jose-jwt:9.40' + api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api 'com.nimbusds:content-type:2.3' api 'com.nimbusds:lang-tag:1.7' // Both msal4j:1.14.3 and oauth2-oidc-sdk:11.9.1 has compile dependency on different versions of json-smart, diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 deleted file mode 100644 index 3cea52ba67ce5..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03b5bd5f5c16eea71f130119dbfb1fe5239f806a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.15.4.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.4.jar.sha1 new file mode 100644 index 0000000000000..97e6fad264294 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.15.4.jar.sha1 @@ -0,0 +1 @@ +489a38c9e6efb5ce01fbd276d8cb6c0e89000459 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.40.jar.sha1 b/plugins/repository-azure/licenses/nimbus-jose-jwt-9.40.jar.sha1 deleted file mode 100644 index 83228caf233cc..0000000000000 --- a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.40.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -42b1dfa0360e4062951b070bac52dd8d96fd7b38 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 b/plugins/repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 new file mode 100644 index 0000000000000..71fa950cb9530 --- /dev/null +++ b/plugins/repository-azure/licenses/nimbus-jose-jwt-9.41.1.jar.sha1 @@ -0,0 +1 @@ +35532a88e1b49a623ec97fd276cc208ea525b6bc \ No newline at end of file diff --git a/qa/remote-clusters/docker-compose.yml b/qa/remote-clusters/docker-compose.yml index cf6aefcf5c1a3..2112da17efe6e 100644 --- a/qa/remote-clusters/docker-compose.yml +++ b/qa/remote-clusters/docker-compose.yml @@ -72,3 +72,11 @@ services: - "9600" volumes: - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro + depends_on: + - opensearch-2 + healthcheck: + test: ["CMD", "timeout", "1", "bash", "-c", "cat < /dev/null > /dev/tcp/localhost/9600"] + interval: 2s + timeout: 1s + retries: 5 + start_period: 15s diff --git a/release-notes/opensearch.release-notes-2.17.1.md b/release-notes/opensearch.release-notes-2.17.1.md new file mode 100644 index 0000000000000..2ff2bd44c3b1c --- /dev/null +++ b/release-notes/opensearch.release-notes-2.17.1.md @@ -0,0 +1,16 @@ +## 2024-10-01 Version 2.17.1 Release Notes + +## [2.17.1] +### Added +- Add path prefix support to hashed prefix snapshots ([#15664](https://github.com/opensearch-project/OpenSearch/pull/15664)) +- Memory optimisations in _cluster/health API ([#15492](https://github.com/opensearch-project/OpenSearch/pull/15492)) + +### Dependencies + +### Changed + +### Deprecated + +### Removed + +### Fixed diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml index ce78709f6ed5e..825bac9f91649 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml @@ -69,145 +69,3 @@ include_defaults: true - match: {defaults.node.attr.testattr: "test"} - ---- -"Test set search backpressure mode": - - - do: - cluster.put_settings: - body: - persistent: - search_backpressure.mode: "monitor_only" - - - match: {persistent: {search_backpressure: {mode: "monitor_only"}}} - ---- -"Test set invalid search backpressure mode": - - - skip: - version: "- 2.8.99" - reason: "Fixed in 2.9.0" - - - do: - catch: bad_request - cluster.put_settings: - body: - persistent: - search_backpressure.mode: "monitor-only" - - - match: {error.root_cause.0.type: "illegal_argument_exception"} - - match: { error.root_cause.0.reason: "Invalid SearchBackpressureMode: monitor-only" } - - match: { status: 400 } - - ---- -"Test setting search backpressure cancellation settings": - - skip: - version: "- 2.17.99" - reason: "Fixed in 2.18.0" - - - do: - cluster.put_settings: - body: - transient: - search_backpressure.search_task.cancellation_burst: 11 - - is_true: acknowledged - - - do: - cluster.get_settings: - flat_settings: false - - match: {transient.search_backpressure.search_task.cancellation_burst: "11"} - - - do: - cluster.put_settings: - body: - transient: - search_backpressure.search_task.cancellation_rate: 0.1 - - is_true: acknowledged - - - do: - cluster.get_settings: - flat_settings: false - - match: {transient.search_backpressure.search_task.cancellation_rate: "0.1"} - - - do: - cluster.put_settings: - body: - transient: - search_backpressure.search_task.cancellation_ratio: 0.2 - - is_true: acknowledged - - - do: - cluster.get_settings: - flat_settings: false - - match: {transient.search_backpressure.search_task.cancellation_ratio: "0.2"} - - - do: - cluster.put_settings: - body: - transient: - search_backpressure.search_shard_task.cancellation_burst: 12 - - is_true: acknowledged - - - do: - cluster.get_settings: - flat_settings: false - - match: {transient.search_backpressure.search_shard_task.cancellation_burst: "12"} - - - do: - cluster.put_settings: - body: - transient: - search_backpressure.search_shard_task.cancellation_rate: 0.3 - - is_true: acknowledged - - - do: - cluster.get_settings: - flat_settings: false - - match: {transient.search_backpressure.search_shard_task.cancellation_rate: "0.3"} - - - do: - cluster.put_settings: - body: - transient: - search_backpressure.search_shard_task.cancellation_ratio: 0.4 - - is_true: acknowledged - - - do: - cluster.get_settings: - flat_settings: false - - match: {transient.search_backpressure.search_shard_task.cancellation_ratio: "0.4"} - ---- -"Test setting invalid search backpressure cancellation_rate and cancellation_ratio": - - skip: - version: "- 2.17.99" - reason: "Fixed in 2.18.0" - - - do: - catch: /search_backpressure.search_task.cancellation_rate must be > 0/ - cluster.put_settings: - body: - transient: - search_backpressure.search_task.cancellation_rate: 0.0 - - - do: - catch: /search_backpressure.search_task.cancellation_ratio must be > 0/ - cluster.put_settings: - body: - transient: - search_backpressure.search_task.cancellation_ratio: 0.0 - - - do: - catch: /search_backpressure.search_shard_task.cancellation_rate must be > 0/ - cluster.put_settings: - body: - transient: - search_backpressure.search_shard_task.cancellation_rate: 0.0 - - - do: - catch: /search_backpressure.search_shard_task.cancellation_ratio must be > 0/ - cluster.put_settings: - body: - transient: - search_backpressure.search_shard_task.cancellation_ratio: 0.0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml index 83d3d273ebd93..e8da81d7bee41 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/90_flat_object.yml @@ -62,7 +62,6 @@ setup: }, "required_matches": 1 } - # Do index refresh - do: indices.refresh: @@ -74,7 +73,52 @@ teardown: - do: indices.delete: index: test - +--- +"Invalid docs": + - skip: + version: "- 2.17.99" + reason: "parsing of these objects would infinite loop prior to 2.18" + # The following documents are invalid. + - do: + catch: /parsing_exception/ + index: + index: test + id: 3 + body: { + "ISBN13": "V12154942123242", + "catalog": [ "Arrays in Action" ], + "required_matches": 1 + } + - do: + catch: /parsing_exception/ + index: + index: test + id: 3 + body: { + "ISBN13": "V12154942123242", + "catalog": "Strings in Action", + "required_matches": 1 + } + - do: + catch: /parsing_exception/ + index: + index: test + id: 3 + body: { + "ISBN13": "V12154942123242", + "catalog": 12345, + "required_matches": 1 + } + - do: + catch: /parsing_exception/ + index: + index: test + id: 3 + body: { + "ISBN13": "V12154942123242", + "catalog": [ 12345 ], + "required_matches": 1 + } --- # Verify that mappings under the catalog field did not expand # and no dynamic fields were created. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml index 8c8a98b2db22c..0cabbf6f25133 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/360_date_histogram.yml @@ -109,6 +109,19 @@ setup: --- "Date histogram aggregation w/ shared field range test": + - do: + indices.create: + index: dhisto-agg-w-query + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + refresh_interval: -1 + mappings: + properties: + date: + type: date + - do: bulk: refresh: true @@ -127,6 +140,11 @@ setup: - '{"index": {}}' - '{"date": "2025-02-14"}' + - do: + indices.forcemerge: + index: dhisto-agg-w-query + max_num_segments: 1 + - do: search: index: dhisto-agg-w-query diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml index 1e1d2b0706d6b..93ca668f93a6f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/40_range.yml @@ -535,8 +535,8 @@ setup: --- "Double range profiler shows filter rewrite info": - skip: - version: " - 2.99.99" - reason: debug info for filter rewrite added in 3.0.0 (to be backported to 2.15.0) + version: " - 2.15.99" + reason: debug info for filter rewrite was added in 2.16.0 - do: indices.create: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.backpressure/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.backpressure/10_basic.yml new file mode 100644 index 0000000000000..2862dcce072c4 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.backpressure/10_basic.yml @@ -0,0 +1,340 @@ +--- +"Test set search backpressure mode": + - skip: + version: "- 2.3.99" + reason: "Introduced in 2.4.0" + - do: + cluster.put_settings: + body: + persistent: + search_backpressure.mode: "monitor_only" + + - match: {persistent: {search_backpressure: {mode: "monitor_only"}}} + +--- +"Test set invalid search backpressure mode": + - skip: + version: "- 2.7.99" + reason: "Fixed in 2.8.0" + + - do: + catch: bad_request + cluster.put_settings: + body: + persistent: + search_backpressure.mode: "monitor-only" + + - match: {error.root_cause.0.type: "illegal_argument_exception"} + - match: { error.root_cause.0.reason: "Invalid SearchBackpressureMode: monitor-only" } + - match: { status: 400 } + +--- +"Test setting search backpressure cancellation settings": + - skip: + version: "- 2.17.99" + reason: "Fixed in 2.18.0" + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.cancellation_burst: 11 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.cancellation_burst: "11"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.cancellation_rate: 0.1 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.cancellation_rate: "0.1"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.cancellation_ratio: 0.2 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.cancellation_ratio: "0.2"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.cancellation_burst: 12 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.cancellation_burst: "12"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.cancellation_rate: 0.3 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.cancellation_rate: "0.3"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.cancellation_ratio: 0.4 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.cancellation_ratio: "0.4"} + +--- +"Test setting invalid search backpressure cancellation_rate and cancellation_ratio": + - skip: + version: "- 2.17.99" + reason: "Fixed in 2.18.0" + + - do: + catch: /search_backpressure.search_task.cancellation_rate must be > 0/ + cluster.put_settings: + body: + transient: + search_backpressure.search_task.cancellation_rate: 0.0 + + - do: + catch: /search_backpressure.search_task.cancellation_ratio must be > 0/ + cluster.put_settings: + body: + transient: + search_backpressure.search_task.cancellation_ratio: 0.0 + + - do: + catch: /search_backpressure.search_shard_task.cancellation_rate must be > 0/ + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.cancellation_rate: 0.0 + + - do: + catch: /search_backpressure.search_shard_task.cancellation_ratio must be > 0/ + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.cancellation_ratio: 0.0 + +--- +"Test setting search backpressure node settings": + - skip: + version: "- 2.3.99" + reason: "Introduced in 2.4.0" + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.node_duress.num_successive_breaches: 10 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.node_duress.num_successive_breaches: "10"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.node_duress.cpu_threshold: 0.85 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.node_duress.cpu_threshold: "0.85"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.node_duress.heap_threshold: 0.75 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.node_duress.heap_threshold: "0.75"} + + +--- +"Test setting search backpressure search_task settings": + - skip: + version: "- 2.3.99" + reason: "Introduced in 2.4.0" + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.elapsed_time_millis_threshold: 50000 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.elapsed_time_millis_threshold: "50000"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.heap_percent_threshold: 0.01 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.heap_percent_threshold: "0.01"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.total_heap_percent_threshold: 0.08 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.total_heap_percent_threshold: "0.08"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.heap_variance: 3.0 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.heap_variance: "3.0"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.heap_moving_average_window_size: 50 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.heap_moving_average_window_size: "50"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_task.cpu_time_millis_threshold: 40000 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_task.cpu_time_millis_threshold: "40000"} + +--- +"Test setting search backpressure search_shard_task settings": + - skip: + version: "- 2.3.99" + reason: "Introduced in 2.4.0" + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.elapsed_time_millis_threshold: 50000 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.elapsed_time_millis_threshold: "50000"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.heap_percent_threshold: 0.01 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.heap_percent_threshold: "0.01"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.total_heap_percent_threshold: 0.08 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.total_heap_percent_threshold: "0.08"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.heap_variance: 3.0 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.heap_variance: "3.0"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.heap_moving_average_window_size: 50 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.heap_moving_average_window_size: "50"} + + - do: + cluster.put_settings: + body: + transient: + search_backpressure.search_shard_task.cpu_time_millis_threshold: 40000 + - is_true: acknowledged + + - do: + cluster.get_settings: + flat_settings: false + - match: {transient.search_backpressure.search_shard_task.cpu_time_millis_threshold: "40000"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/380_bitmap_filtering.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml rename to rest-api-spec/src/main/resources/rest-api-spec/test/search/380_bitmap_filtering.yml diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/390_search_as_you_type.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/390_search_as_you_type.yml new file mode 100644 index 0000000000000..f29fa3f9aa987 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/390_search_as_you_type.yml @@ -0,0 +1,75 @@ +setup: + - do: + indices.create: + index: test_1 + body: + mappings: + properties: + text: + type: search_as_you_type + fields: + subField: + type: keyword + - do: + index: + index: test_1 + id: 1 + body: { text: test search as you type } + + - do: + indices.refresh: + index: [test_1] + +--- +teardown: + - do: + indices.delete: + index: test_1 + +# related issue: https://github.com/opensearch-project/OpenSearch/issues/5035 +--- +"Test search_as_you_type data type supports multi-fields": + - skip: + version: " - 2.17.99" + reason: "the bug was fixed since 2.18.0" + + - do: + indices.get_mapping: { + index: test_1 + } + + - match: {test_1.mappings.properties.text.type: search_as_you_type} + - match: {test_1.mappings.properties.text.fields.subField.type: keyword} + + - do: + search: + index: test_1 + body: + query: + multi_match: + query: "test search" + type: "bool_prefix" + + - match: {hits.total.value: 1} + + - do: + search: + index: test_1 + body: + query: + multi_match: + query: "test search" + type: "bool_prefix" + fields: ["text.subField"] + + - match: {hits.total.value: 1} + + - do: + search: + index: test_1 + body: + query: + term: + text.subField: "test search as you type" + + - match: {hits.total.value: 1} diff --git a/server/build.gradle b/server/build.gradle index 0cc42ad690eab..83a04ef12d13b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -127,7 +127,7 @@ dependencies { api "jakarta.annotation:jakarta.annotation-api:${versions.jakarta_annotation}" // https://mvnrepository.com/artifact/org.roaringbitmap/RoaringBitmap - implementation 'org.roaringbitmap:RoaringBitmap:1.2.1' + implementation 'org.roaringbitmap:RoaringBitmap:1.3.0' testImplementation(project(":test:framework")) { // tests use the locally compiled version of server diff --git a/server/licenses/RoaringBitmap-1.2.1.jar.sha1 b/server/licenses/RoaringBitmap-1.2.1.jar.sha1 deleted file mode 100644 index ef8cd48c7a388..0000000000000 --- a/server/licenses/RoaringBitmap-1.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -828eb489b5e8c8762f2471010e9c7f20c7de596d \ No newline at end of file diff --git a/server/licenses/RoaringBitmap-1.3.0.jar.sha1 b/server/licenses/RoaringBitmap-1.3.0.jar.sha1 new file mode 100644 index 0000000000000..2e48289c4f7af --- /dev/null +++ b/server/licenses/RoaringBitmap-1.3.0.jar.sha1 @@ -0,0 +1 @@ +a46ce7a2dc494da69700ab421f081b1583857f6d \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/NodeJoinLeftIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/NodeJoinLeftIT.java new file mode 100644 index 0000000000000..014e2bf642a4d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/NodeJoinLeftIT.java @@ -0,0 +1,355 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.cluster.coordination; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.cluster.NodeConnectionsService; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.MockEngineFactoryPlugin; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.plugins.Plugin; +import org.opensearch.tasks.Task; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import org.opensearch.test.OpenSearchIntegTestCase.Scope; +import org.opensearch.test.TestLogsAppender; +import org.opensearch.test.store.MockFSIndexStore; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.test.transport.StubbableTransport; +import org.opensearch.transport.ClusterConnectionManager; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.transport.TransportService; +import org.junit.After; +import org.junit.Before; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_ACTION_NAME; +import static org.hamcrest.Matchers.is; + +/** + Check https://github.com/opensearch-project/OpenSearch/issues/4874 and + https://github.com/opensearch-project/OpenSearch/pull/15521 for context + */ +@ClusterScope(scope = Scope.TEST, numDataNodes = 0) +public class NodeJoinLeftIT extends OpenSearchIntegTestCase { + + private TestLogsAppender testLogsAppender; + private String clusterManager; + private String redNodeName; + private LoggerContext loggerContext; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList( + MockTransportService.TestPlugin.class, + MockFSIndexStore.TestPlugin.class, + InternalSettingsPlugin.class, + MockEngineFactoryPlugin.class + ); + } + + @Override + protected void beforeIndexDeletion() throws Exception { + super.beforeIndexDeletion(); + internalCluster().assertConsistentHistoryBetweenTranslogAndLuceneIndex(); + internalCluster().assertSeqNos(); + internalCluster().assertSameDocIdsOnShards(); + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + // Add any other specific messages you want to capture + List messagesToCapture = Arrays.asList("failed to join", "IllegalStateException"); + testLogsAppender = new TestLogsAppender(messagesToCapture); + loggerContext = (LoggerContext) LogManager.getContext(false); + Configuration config = loggerContext.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(ClusterConnectionManager.class.getName()); + loggerConfig.addAppender(testLogsAppender, null, null); + loggerContext.updateLoggers(); + + String indexName = "test"; + final Settings nodeSettings = Settings.builder() + .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms") + .put(NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.getKey(), "10s") + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "200ms") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "100ms") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.getKey(), "100ms") + .build(); + // start a 3 node cluster with 1 cluster-manager + this.clusterManager = internalCluster().startNode(nodeSettings); + internalCluster().startNode(Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build()); + this.redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); + + // validate the 3 node cluster is up + ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get(); + assertThat(response.isTimedOut(), is(false)); + + // create an index + client().admin() + .indices() + .prepareCreate(indexName) + .setSettings( + Settings.builder() + .put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .get(); + } + + @After + public void tearDown() throws Exception { + testLogsAppender.clearCapturedLogs(); + loggerContext = (LoggerContext) LogManager.getContext(false); + Configuration config = loggerContext.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(ClusterConnectionManager.class.getName()); + loggerConfig.removeAppender(testLogsAppender.getName()); + loggerContext.updateLoggers(); + super.tearDown(); + } + + public void testClusterStabilityWhenJoinRequestHappensDuringNodeLeftTask() throws Exception { + + ClusterService clusterManagerClsService = internalCluster().getInstance(ClusterService.class, clusterManager); + // Simulate a slow applier on the cm to delay node-left state application + clusterManagerClsService.addStateApplier(event -> { + if (event.nodesRemoved()) { + try { + Thread.sleep(3000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + }); + // Toggle to succeed/fail the followerchecker to simulate the initial node leaving. + AtomicBoolean succeedFollowerChecker = new AtomicBoolean(); + + // Simulate followerchecker failure on 1 node when succeedFollowerChecker is false + FollowerCheckerBehaviour simulatedFailureBehaviour = new FollowerCheckerBehaviour(() -> { + if (succeedFollowerChecker.get()) { + return; + } + try { + Thread.sleep(10); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new NodeHealthCheckFailureException("fake followerchecker failure simulated by test to repro race condition"); + }); + MockTransportService redTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + redNodeName + ); + redTransportService.addRequestHandlingBehavior(FOLLOWER_CHECK_ACTION_NAME, simulatedFailureBehaviour); + + // Loop runs 5 times to ensure race condition gets reproduced + testLogsAppender.clearCapturedLogs(); + for (int i = 0; i < 5; i++) { + logger.info("--> simulating followerchecker failure to trigger node-left"); + succeedFollowerChecker.set(false); + ClusterHealthResponse response1 = client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); + assertThat(response1.isTimedOut(), is(false)); + + // once we know a node has left, we can re-enable followerchecker to work normally and validate node rejoins + logger.info("--> re-enabling normal followerchecker and validating cluster is stable"); + succeedFollowerChecker.set(true); + ClusterHealthResponse response2 = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response2.isTimedOut(), is(false)); + + Thread.sleep(1000); + // checking again to validate stability and ensure node did not leave + ClusterHealthResponse response3 = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response3.isTimedOut(), is(false)); + } + + succeedFollowerChecker.set(true); + ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response.isTimedOut(), is(false)); + + // assert that join requests fail with the right exception + boolean logFound = testLogsAppender.waitForLog("failed to join", 30, TimeUnit.SECONDS) + && testLogsAppender.waitForLog( + "IllegalStateException[cannot make a new connection as disconnect to node", + 30, + TimeUnit.SECONDS + ); + assertTrue("Expected log was not found within the timeout period", logFound); + } + + public void testClusterStabilityWhenDisconnectDuringSlowNodeLeftTask() throws Exception { + ClusterService clusterManagerClsService = internalCluster().getInstance(ClusterService.class, clusterManager); + // Simulate a slow applier on the cm to delay node-left state application + clusterManagerClsService.addStateApplier(event -> { + if (event.nodesRemoved()) { + try { + Thread.sleep(3000); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + }); + // Toggle to succeed/fail the followerchecker to simulate the initial node leaving. + AtomicBoolean succeedFollowerChecker = new AtomicBoolean(); + + // Simulate followerchecker failure on 1 node when succeedFollowerChecker is false + FollowerCheckerBehaviour simulatedFailureBehaviour = new FollowerCheckerBehaviour(() -> { + if (succeedFollowerChecker.get()) { + return; + } + try { + Thread.sleep(10); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new NodeHealthCheckFailureException("fake followerchecker failure simulated by test to repro race condition"); + }); + MockTransportService cmTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + clusterManager + ); + MockTransportService redTransportService = (MockTransportService) internalCluster().getInstance( + TransportService.class, + redNodeName + ); + redTransportService.addRequestHandlingBehavior(FOLLOWER_CHECK_ACTION_NAME, simulatedFailureBehaviour); + + // Loop runs 5 times to ensure race condition gets reproduced + testLogsAppender.clearCapturedLogs(); + for (int i = 0; i < 5; i++) { + // Fail followerchecker by force to trigger node disconnect and node left + logger.info("--> simulating followerchecker failure to trigger node-left"); + succeedFollowerChecker.set(false); + Thread.sleep(1000); + + // Trigger a node disconnect while node-left task is still processing + logger.info( + "--> triggering a simulated disconnect on red node, after the follower checker failed to see how node-left task deals with this" + ); + cmTransportService.disconnectFromNode(redTransportService.getLocalDiscoNode()); + + ClusterHealthResponse response1 = client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); + assertThat(response1.isTimedOut(), is(false)); + + // once we know a node has left, we can re-enable followerchecker to work normally and validate node rejoins + logger.info("--> re-enabling normal followerchecker and validating cluster is stable"); + succeedFollowerChecker.set(true); + ClusterHealthResponse response2 = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response2.isTimedOut(), is(false)); + + Thread.sleep(1000); + // checking again to validate stability and ensure node did not leave + ClusterHealthResponse response3 = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response3.isTimedOut(), is(false)); + } + + succeedFollowerChecker.set(true); + ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response.isTimedOut(), is(false)); + + // assert that join requests fail with the right exception + boolean logFound = testLogsAppender.waitForLog("failed to join", 30, TimeUnit.SECONDS); + assertTrue("Expected log was not found within the timeout period", logFound); + logFound = testLogsAppender.waitForLog( + "IllegalStateException[cannot make a new connection as disconnect to node", + 30, + TimeUnit.SECONDS + ); + assertTrue("Expected log was not found within the timeout period", logFound); + } + + public void testRestartDataNode() throws Exception { + + Settings redNodeDataPathSettings = internalCluster().dataPathSettings(redNodeName); + logger.info("-> stopping data node"); + internalCluster().stopRandomNode(settings -> settings.get("node.name").equals(redNodeName)); + ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); + assertThat(response.isTimedOut(), is(false)); + + logger.info("-> restarting stopped node"); + internalCluster().startNode(Settings.builder().put("node.name", redNodeName).put(redNodeDataPathSettings).build()); + response = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response.isTimedOut(), is(false)); + } + + public void testRestartCmNode() throws Exception { + + Settings cmNodeSettings = internalCluster().dataPathSettings(clusterManager); + + logger.info("-> stopping cluster-manager node"); + internalCluster().stopRandomNode(settings -> settings.get("node.name").equals(clusterManager)); + ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); + assertThat(response.isTimedOut(), is(false)); + + logger.info("-> restarting stopped node"); + internalCluster().startNode(Settings.builder().put("node.name", clusterManager).put(cmNodeSettings).build()); + response = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); + assertThat(response.isTimedOut(), is(false)); + } + + private class FollowerCheckerBehaviour implements StubbableTransport.RequestHandlingBehavior { + private final Runnable connectionBreaker; + + private FollowerCheckerBehaviour(Runnable connectionBreaker) { + this.connectionBreaker = connectionBreaker; + } + + @Override + public void messageReceived( + TransportRequestHandler handler, + TransportRequest request, + TransportChannel channel, + Task task + ) throws Exception { + + connectionBreaker.run(); + handler.messageReceived(request, channel, task); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemotePublicationConfigurationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemotePublicationConfigurationIT.java new file mode 100644 index 0000000000000..1d19a4bfd1af5 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemotePublicationConfigurationIT.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.remotemigration.MigrationBaseTestCase; +import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.junit.Before; + +import java.util.Collection; +import java.util.Locale; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.MIGRATION_DIRECTION_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Tests the compatibility between types of nodes based on the configured repositories + * Non Remote node [No Repositories configured] + * Remote Publish Configured Node [Cluster State + Routing Table] + * Remote Node [Cluster State + Segment + Translog] + * Remote Node With Routing Table [Cluster State + Segment + Translog + Routing Table] + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemotePublicationConfigurationIT extends MigrationBaseTestCase { + private final String REMOTE_PRI_DOCREP_REP = "remote-primary-docrep-replica"; + + @Override + protected Collection> nodePlugins() { + /* Adding the following mock plugins: + - InternalSettingsPlugin : To override default intervals of retention lease and global ckp sync + - MockFsRepositoryPlugin and MockTransportService.TestPlugin: To ensure remote interactions are not no-op and retention leases are properly propagated + */ + return Stream.concat( + super.nodePlugins().stream(), + Stream.of(InternalSettingsPlugin.class, MockFsRepositoryPlugin.class, MockTransportService.TestPlugin.class) + ).collect(Collectors.toList()); + } + + @Before + public void setUp() throws Exception { + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + super.setUp(); + } + + public Settings.Builder remotePublishConfiguredNodeSetting() { + String stateRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + String prefixModeVerificationSuffix = BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(); + String stateRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String routingTableRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + ROUTING_TABLE_REPO_NAME + ); + String routingTableRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + ROUTING_TABLE_REPO_NAME + ); + + Settings.Builder builder = Settings.builder() + .put("node.attr." + REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) + .put(stateRepoTypeAttributeKey, ReloadableFsRepository.TYPE) + .put(stateRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put(stateRepoSettingsAttributeKeyPrefix + prefixModeVerificationSuffix, true) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, ROUTING_TABLE_REPO_NAME) + .put(routingTableRepoTypeAttributeKey, ReloadableFsRepository.TYPE) + .put(routingTableRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath); + return builder; + } + + public Settings.Builder remoteWithRoutingTableNodeSetting() { + // Remote Cluster with Routing table + return Settings.builder() + .put( + buildRemoteStoreNodeAttributes( + REPOSITORY_NAME, + segmentRepoPath, + REPOSITORY_2_NAME, + translogRepoPath, + REPOSITORY_NAME, + segmentRepoPath, + false + ) + ) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true); + } + + public void testRemotePublishConfigNodeJoinNonRemoteCluster() throws Exception { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + ensureStableCluster(6); + ensureGreen(); + } + + public void testRemotePublishConfigNodeJoinRemoteCluster() throws Exception { + // Remote Cluster without Routing table + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + setAddRemote(false); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemoteNodeWithRoutingTableJoinRemoteCluster() throws Exception { + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNodes(2); + setAddRemote(false); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + // Remote Repo with Routing table + Settings settings = remoteWithRoutingTableNodeSetting().build(); + internalCluster().startClusterManagerOnlyNode(settings); + ensureStableCluster(4); + ensureGreen(); + } + + public void testNonRemoteNodeJoinRemoteWithRoutingCluster() throws Exception { + Settings settings = remoteWithRoutingTableNodeSetting().build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNodes(2, settings); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + internalCluster().startClusterManagerOnlyNode(); + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemotePublishConfigNodeJoinRemoteWithRoutingCluster() throws Exception { + Settings settings = remoteWithRoutingTableNodeSetting().build(); + internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNodes(2, settings); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + internalCluster().startClusterManagerOnlyNode(remotePublishConfiguredNodeSetting().build()); + + ensureStableCluster(4); + ensureGreen(); + } + + public void testNonRemoteNodeJoiningPublishConfigCluster() throws Exception { + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + internalCluster().startClusterManagerOnlyNode(); + + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemoteNodeJoiningPublishConfigCluster() throws Exception { + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + + setAddRemote(true); + internalCluster().startClusterManagerOnlyNode(); + ensureStableCluster(4); + ensureGreen(); + } + + public void testRemoteNodeWithRoutingTableJoiningPublishConfigCluster() throws Exception { + Settings.Builder build = remotePublishConfiguredNodeSetting(); + internalCluster().startClusterManagerOnlyNode(build.build()); + internalCluster().startDataOnlyNodes(2, build.build()); + + ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest(); + updateSettingsRequest.persistentSettings( + Settings.builder() + .put(MIGRATION_DIRECTION_SETTING.getKey(), "remote_store") + .put(REMOTE_STORE_COMPATIBILITY_MODE_SETTING.getKey(), "mixed") + ); + + Settings settings = Settings.builder() + .put( + buildRemoteStoreNodeAttributes( + REPOSITORY_NAME, + segmentRepoPath, + REPOSITORY_2_NAME, + translogRepoPath, + ROUTING_TABLE_REPO_NAME, + segmentRepoPath, + false + ) + ) + .put(REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) + .build(); + assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet()); + internalCluster().startClusterManagerOnlyNode(settings); + + ensureStableCluster(4); + ensureGreen(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java index ffb9352e8ba47..578c922c80a0d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/remote/RemoteStatePublicationIT.java @@ -235,7 +235,6 @@ public void testRemotePublicationDownloadStats() { assertDataNodeDownloadStats(nodesStatsResponseDataNode); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/15767") public void testRemotePublicationDisabledByRollingRestart() throws Exception { prepareCluster(3, 2, INDEX_NAME, 1, 2); ensureStableCluster(5); @@ -272,7 +271,6 @@ public void doAfterNodes(int n, Client client) { assertTrue( stats.getFullClusterStateReceivedCount() > 0 || stats.getCompatibleClusterStateDiffReceivedCount() > 0 ); - assertEquals(0, stats.getIncompatibleClusterStateDiffReceivedCount()); } else { DiscoveryStats stats = nodeStats.getDiscoveryStats(); assertEquals(0, stats.getPublishStats().getFullClusterStateReceivedCount()); @@ -297,7 +295,7 @@ public void doAfterNodes(int n, Client client) { ); if (activeCMRestarted) { assertNull(remoteState.getLastAcceptedState()); - // assertNull(remoteState.getLastAcceptedManifest()); + assertNull(remoteState.getLastAcceptedManifest()); } else { ClusterState localState = registry.getPersistedState(PersistedStateRegistry.PersistedStateType.LOCAL) .getLastAcceptedState(); @@ -326,7 +324,6 @@ public void doAfterNodes(int n, Client client) { response.getNodes().forEach(nodeStats -> { PublishClusterStateStats stats = nodeStats.getDiscoveryStats().getPublishStats(); assertTrue(stats.getFullClusterStateReceivedCount() > 0 || stats.getCompatibleClusterStateDiffReceivedCount() > 0); - assertEquals(0, stats.getIncompatibleClusterStateDiffReceivedCount()); }); NodesInfoResponse nodesInfoResponse = client().admin() .cluster() @@ -341,7 +338,7 @@ public void doAfterNodes(int n, Client client) { PersistedStateRegistry registry = internalCluster().getInstance(PersistedStateRegistry.class, node); CoordinationState.PersistedState remoteState = registry.getPersistedState(PersistedStateRegistry.PersistedStateType.REMOTE); assertNull(remoteState.getLastAcceptedState()); - // assertNull(remoteState.getLastAcceptedManifest()); + assertNull(remoteState.getLastAcceptedManifest()); }); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index 17a9c3ddbe317..a82e6d45ce0f6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -57,6 +57,8 @@ public class MigrationBaseTestCase extends OpenSearchIntegTestCase { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected static final String ROUTING_TABLE_REPO_NAME = "remote-routing-repo"; + protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; protected Path segmentRepoPath; @@ -72,7 +74,7 @@ public class MigrationBaseTestCase extends OpenSearchIntegTestCase { randomAlphaOfLength(5) ); - void setAddRemote(boolean addRemote) { + public void setAddRemote(boolean addRemote) { this.addRemote = addRemote; } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java index b55219e1cb37f..35689deeb8134 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteMigrationIndexMetadataUpdateIT.java @@ -19,7 +19,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.core.util.FileSystemUtils; import org.opensearch.index.remote.RemoteIndexPath; -import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.InternalTestCluster; @@ -533,17 +532,18 @@ public void testRemoteIndexPathFileExistsAfterMigration() throws Exception { // validate remote index path file exists logger.info("---> Asserting remote index path file exists"); - String fileNamePrefix = String.join(RemoteIndexPathUploader.DELIMITER, indexUUID, "7", RemoteIndexPath.DEFAULT_VERSION); assertTrue(FileSystemUtils.exists(translogRepoPath.resolve(RemoteIndexPath.DIR))); Path[] files = FileSystemUtils.files(translogRepoPath.resolve(RemoteIndexPath.DIR)); assertEquals(1, files.length); - assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); + logger.info(files[0].toString()); + assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(indexUUID))); assertTrue(FileSystemUtils.exists(segmentRepoPath.resolve(RemoteIndexPath.DIR))); files = FileSystemUtils.files(segmentRepoPath.resolve(RemoteIndexPath.DIR)); assertEquals(1, files.length); - assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(fileNamePrefix))); + logger.info(files[0].toString()); + assertTrue(Arrays.stream(files).anyMatch(file -> file.toString().contains(indexUUID))); } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java index 30c597e405f4e..d9e72dd137182 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java @@ -12,6 +12,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; import org.opensearch.common.unit.TimeValue; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; @@ -68,7 +69,6 @@ public void testNewIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMixedMode() assertRemoteStoreBackedIndex(indexName2); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/15793") public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMixedMode() throws Exception { logger.info("Initialize cluster: gives non remote cluster manager"); initializeCluster(false); @@ -76,7 +76,10 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix logger.info("Add remote and non-remote nodes"); setClusterMode(MIXED.mode); addRemote = false; - String nonRemoteNodeName = internalCluster().startNode(); + Settings settings = Settings.builder() + .put(BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING.getKey(), snapshotShardPathFixedPrefix ? "c" : "") + .build(); + String nonRemoteNodeName = internalCluster().startNode(settings); addRemote = true; String remoteNodeName = internalCluster().startNode(); internalCluster().validateClusterFormed(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 0acb578e2e7bf..a0183e89bfce2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -11,6 +11,8 @@ import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; @@ -25,6 +27,7 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.index.Index; @@ -43,14 +46,11 @@ import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.FsRepository; -import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotRestoreException; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.After; -import org.junit.Before; import java.io.IOException; import java.nio.file.Files; @@ -63,6 +63,7 @@ import java.util.Objects; import java.util.Optional; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -79,48 +80,7 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { - private static final String BASE_REMOTE_REPO = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - private Path remoteRepoPath; - - @Before - public void setup() { - remoteRepoPath = randomRepoPath().toAbsolutePath(); - } - - @After - public void teardown() { - clusterAdmin().prepareCleanupRepository(BASE_REMOTE_REPO).get(); - } - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) - .build(); - } - - private Settings.Builder getIndexSettings(int numOfShards, int numOfReplicas) { - Settings.Builder settingsBuilder = Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s"); - return settingsBuilder; - } - - private void indexDocuments(Client client, String indexName, int numOfDocs) { - indexDocuments(client, indexName, 0, numOfDocs); - } - - private void indexDocuments(Client client, String indexName, int fromId, int toId) { - for (int i = fromId; i < toId; i++) { - String id = Integer.toString(i); - client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); - } - client.admin().indices().prepareFlush(indexName).get(); - } +public class RemoteRestoreSnapshotIT extends RemoteSnapshotIT { private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { for (int i = 0; i < numOfDocs; i++) { @@ -997,6 +957,75 @@ public void testConcurrentSnapshotV2CreateOperation() throws InterruptedExceptio assertThat(repositoryData.getSnapshotIds().size(), greaterThanOrEqualTo(1)); } + public void testConcurrentSnapshotV2CreateOperation_MasterChange() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + Thread thread = new Thread(() -> { + try { + String snapshotName = "snapshot-earlier-master"; + internalCluster().nonClusterManagerClient() + .admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName) + .setWaitForCompletion(true) + .setMasterNodeTimeout(TimeValue.timeValueSeconds(60)) + .get(); + + } catch (Exception ignored) {} + }); + thread.start(); + + // stop existing master + final String clusterManagerNode = internalCluster().getClusterManagerName(); + stopNode(clusterManagerNode); + + // Validate that we have greater one snapshot has been created + String snapshotName = "new-snapshot"; + try { + client().admin().cluster().prepareCreateSnapshot(snapshotRepoName, snapshotName).setWaitForCompletion(true).get(); + } catch (Exception e) { + logger.info("Exception while creating new-snapshot", e); + } + + // Validate that snapshot is present in repository data + assertBusy(() -> { + GetSnapshotsRequest request = new GetSnapshotsRequest(snapshotRepoName); + GetSnapshotsResponse response2 = client().admin().cluster().getSnapshots(request).actionGet(); + assertThat(response2.getSnapshots().size(), greaterThanOrEqualTo(1)); + }, 30, TimeUnit.SECONDS); + thread.join(); + } + public void testCreateSnapshotV2WithRedIndex() throws Exception { internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); internalCluster().startDataOnlyNode(pinnedTimestampSettings()); @@ -1315,11 +1344,4 @@ public void testConcurrentV1SnapshotAndV2RepoSettingUpdate() throws Exception { createV1SnapshotThread.join(); } - private Settings pinnedTimestampSettings() { - Settings settings = Settings.builder() - .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) - .build(); - return settings; - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSnapshotIT.java new file mode 100644 index 0000000000000..836871b8251d1 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSnapshotIT.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.repositories.fs.ReloadableFsRepository; +import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.junit.After; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.concurrent.ExecutionException; + +import static org.opensearch.repositories.fs.ReloadableFsRepository.REPOSITORIES_FAILRATE_SETTING; + +public abstract class RemoteSnapshotIT extends AbstractSnapshotIntegTestCase { + protected static final String BASE_REMOTE_REPO = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + protected Path remoteRepoPath; + + @Before + public void setup() { + remoteRepoPath = randomRepoPath().toAbsolutePath(); + } + + @After + public void teardown() { + clusterAdmin().prepareCleanupRepository(BASE_REMOTE_REPO).get(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) + .build(); + } + + protected Settings pinnedTimestampSettings() { + Settings settings = Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .build(); + return settings; + } + + protected Settings.Builder getIndexSettings(int numOfShards, int numOfReplicas) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s"); + return settingsBuilder; + } + + protected void indexDocuments(Client client, String indexName, int numOfDocs) { + indexDocuments(client, indexName, 0, numOfDocs); + } + + void indexDocuments(Client client, String indexName, int fromId, int toId) { + for (int i = fromId; i < toId; i++) { + String id = Integer.toString(i); + client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + } + client.admin().indices().prepareFlush(indexName).get(); + } + + protected void setFailRate(String repoName, int value) throws ExecutionException, InterruptedException { + GetRepositoriesRequest gr = new GetRepositoriesRequest(new String[] { repoName }); + GetRepositoriesResponse res = client().admin().cluster().getRepositories(gr).get(); + RepositoryMetadata rmd = res.repositories().get(0); + Settings.Builder settings = Settings.builder() + .put("location", rmd.settings().get("location")) + .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); + createRepository(repoName, ReloadableFsRepository.TYPE, settings); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index d078ba05faa12..6a5adf5ea4fb7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -42,6 +42,7 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; +import java.io.IOError; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -339,10 +340,11 @@ public void testFullClusterRestoreManifestFilePointsToInvalidIndexMetadataPathTh for (UploadedIndexMetadata md : manifest.getIndices()) { Files.move(segmentRepoPath.resolve(md.getUploadedFilename()), segmentRepoPath.resolve("cluster-state/")); } + internalCluster().stopAllNodes(); } catch (IOException e) { throw new RuntimeException(e); } - assertThrows(IllegalStateException.class, () -> addNewNodes(dataNodeCount, clusterManagerNodeCount)); + assertThrows(IOError.class, () -> internalCluster().client()); // Test is complete // Starting a node without remote state to ensure test cleanup diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 692727357a88a..ebb911c739eb3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -18,6 +18,7 @@ import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.client.Requests; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; @@ -202,7 +203,7 @@ public void testRemoteTranslogCleanup() throws Exception { public void testStaleCommitDeletionWithInvokeFlush() throws Exception { String dataNode = internalCluster().startNode(); - createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000L, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); String segmentsPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.get(getNodeSettings()); @@ -1011,4 +1012,70 @@ public void testAsyncTranslogDurabilityRestrictionsThroughIdxTemplates() throws .get() ); } + + public void testCloseIndexWithNoOpSyncAndFlushForSyncTranslog() throws InterruptedException { + internalCluster().startNodes(3); + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "5s")) + .get(); + Settings.Builder settings = Settings.builder() + .put(remoteStoreIndexSettings(0, 10000L, -1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s"); + createIndex(INDEX_NAME, settings.build()); + CountDownLatch latch = new CountDownLatch(1); + new Thread(() -> { + if (randomBoolean()) { + for (int i = 0; i < randomIntBetween(1, 5); i++) { + indexSingleDoc(INDEX_NAME); + } + flushAndRefresh(INDEX_NAME); + } + // Index single doc to start the asyn io processor to run which will lead to 10s wait time before the next sync. + indexSingleDoc(INDEX_NAME); + // Reduce the latch for the main thread to flush after some sleep. + latch.countDown(); + // Index another doc and in this case the flush would have happened before the sync. + indexSingleDoc(INDEX_NAME); + }).start(); + // Wait for atleast one doc to be ingested. + latch.await(); + // Sleep for some time for the next doc to be present in lucene buffer. If flush happens first before the doc #2 + // gets indexed, then it goes into the happy case where the close index happens succefully. + Thread.sleep(1000); + // Flush so that the subsequent sync or flushes are no-op. + flush(INDEX_NAME); + // Closing the index involves translog.sync and shard.flush which are now no-op. + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(10000); + ensureGreen(INDEX_NAME); + } + + public void testCloseIndexWithNoOpSyncAndFlushForAsyncTranslog() throws InterruptedException { + internalCluster().startNodes(3); + Settings.Builder settings = Settings.builder() + .put(remoteStoreIndexSettings(0, 10000L, -1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1s") + .put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Durability.ASYNC) + .put(IndexSettings.INDEX_TRANSLOG_SYNC_INTERVAL_SETTING.getKey(), "10s"); + createIndex(INDEX_NAME, settings.build()); + CountDownLatch latch = new CountDownLatch(1); + new Thread(() -> { + // Index some docs to start the asyn io processor to run which will lead to 10s wait time before the next sync. + indexSingleDoc(INDEX_NAME); + indexSingleDoc(INDEX_NAME); + indexSingleDoc(INDEX_NAME); + // Reduce the latch for the main thread to flush after some sleep. + latch.countDown(); + }).start(); + // Wait for atleast one doc to be ingested. + latch.await(); + // Flush so that the subsequent sync or flushes are no-op. + flush(INDEX_NAME); + // Closing the index involves translog.sync and shard.flush which are now no-op. + client().admin().indices().close(Requests.closeIndexRequest(INDEX_NAME)).actionGet(); + Thread.sleep(10000); + ensureGreen(INDEX_NAME); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsGarbageCollectionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsGarbageCollectionIT.java new file mode 100644 index 0000000000000..08ece7df457cc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsGarbageCollectionIT.java @@ -0,0 +1,508 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.action.ActionListener; +import org.opensearch.index.translog.transfer.TranslogTransferMetadata; +import org.opensearch.indices.RemoteStoreSettings; +import org.opensearch.node.remotestore.RemoteStorePinnedTimestampService; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; +import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.TRANSLOG; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.DATA; +import static org.opensearch.index.remote.RemoteStoreEnums.DataType.METADATA; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStorePinnedTimestampsGarbageCollectionIT extends RemoteStoreBaseIntegTestCase { + static final String INDEX_NAME = "remote-store-test-idx-1"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), false) + .build(); + } + + private void keepPinnedTimestampSchedulerUpdated() throws InterruptedException { + long currentTime = System.currentTimeMillis(); + int maxRetry = 10; + while (maxRetry > 0 && RemoteStorePinnedTimestampService.getPinnedTimestamps().v1() <= currentTime) { + Thread.sleep(1000); + maxRetry--; + } + } + + ActionListener noOpActionListener = new ActionListener<>() { + @Override + public void onResponse(Void unused) {} + + @Override + public void onFailure(Exception e) {} + }; + + public void testLiveIndexNoPinnedTimestamps() throws Exception { + prepareCluster(1, 1, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 0) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = randomIntBetween(5, 10); + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + } + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + String shardMetadataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogMetadataPath = Path.of(translogRepoPath + "/" + shardMetadataPath); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertEquals(2, metadataFiles.size()); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }); + } + + public void testLiveIndexNoPinnedTimestampsWithExtraGenSettingWithinLimit() throws Exception { + prepareCluster(1, 1, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 10) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = randomIntBetween(5, 9); + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + } + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + String shardMetadataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogMetadataPath = Path.of(translogRepoPath + "/" + shardMetadataPath); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertTrue(metadataFiles.size() >= numDocs + 1); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }, 30, TimeUnit.SECONDS); + } + + public void testLiveIndexNoPinnedTimestampsWithExtraGenSetting() throws Exception { + prepareCluster(1, 1, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 3) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = 5; + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + } + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + String shardMetadataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogMetadataPath = Path.of(translogRepoPath + "/" + shardMetadataPath); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertEquals(5, metadataFiles.size()); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }); + } + + public void testLiveIndexWithPinnedTimestamps() throws Exception { + prepareCluster(1, 1, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 0) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = randomIntBetween(5, 10); + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + if (i == 2) { + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.timeValueMinutes(1)); + remoteStorePinnedTimestampService.pinTimestamp(System.currentTimeMillis(), "xyz", noOpActionListener); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + } + } + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + String shardMetadataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogMetadataPath = Path.of(translogRepoPath + "/" + shardMetadataPath); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertEquals(3, metadataFiles.size()); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }); + } + + public void testLiveIndexWithPinnedTimestampsMultiplePrimaryTerms() throws Exception { + prepareCluster(1, 2, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(1, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 3) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = randomIntBetween(5, 10); + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + if (i == 2) { + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.timeValueMinutes(1)); + remoteStorePinnedTimestampService.pinTimestamp(System.currentTimeMillis(), "xyz", noOpActionListener); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + } + } + + ingestDocs(); + + internalCluster().restartNode(primaryNodeName(INDEX_NAME)); + ensureGreen(INDEX_NAME); + + ingestDocs(); + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + + assertBusy(() -> { + List dataFiles = Files.list(translogDataPath).collect(Collectors.toList()); + assertFalse(dataFiles.isEmpty()); + }); + } + + private void ingestDocs() { + int numDocs = randomIntBetween(15, 20); + for (int i = 0; i < numDocs; i++) { + indexSingleDoc(INDEX_NAME, false); + } + + assertNoFailures(client().admin().indices().prepareRefresh(INDEX_NAME).setIndicesOptions(IndicesOptions.lenientExpandOpen()).get()); + flushAndRefresh(INDEX_NAME); + + int numDocsPostFailover = randomIntBetween(15, 20); + for (int i = 0; i < numDocsPostFailover; i++) { + indexSingleDoc(INDEX_NAME, false); + } + + flushAndRefresh(INDEX_NAME); + assertNoFailures(client().admin().indices().prepareRefresh(INDEX_NAME).setIndicesOptions(IndicesOptions.lenientExpandOpen()).get()); + } + + public void testIndexDeletionNoPinnedTimestamps() throws Exception { + prepareCluster(1, 1, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 0) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = randomIntBetween(5, 10); + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + } + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + String shardMetadataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogMetadataPath = Path.of(translogRepoPath + "/" + shardMetadataPath); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertEquals(2, metadataFiles.size()); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }); + + keepPinnedTimestampSchedulerUpdated(); + client().admin().indices().prepareDelete(INDEX_NAME).get(); + + assertBusy(() -> { + assertEquals(0, Files.list(translogMetadataPath).collect(Collectors.toList()).size()); + assertEquals(0, Files.list(translogDataPath).collect(Collectors.toList()).size()); + }); + } + + public void testIndexDeletionWithPinnedTimestamps() throws Exception { + prepareCluster(1, 1, Settings.EMPTY); + Settings indexSettings = Settings.builder() + .put(remoteStoreIndexSettings(0, 1)) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 0) + .build(); + createIndex(INDEX_NAME, indexSettings); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + primaryNodeName(INDEX_NAME) + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + int numDocs = randomIntBetween(5, 10); + for (int i = 0; i < numDocs; i++) { + keepPinnedTimestampSchedulerUpdated(); + indexSingleDoc(INDEX_NAME, true); + if (i == 2) { + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.timeValueMinutes(1)); + remoteStorePinnedTimestampService.pinTimestamp(System.currentTimeMillis(), "xyz", noOpActionListener); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + } + } + + String translogPathFixedPrefix = RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.get(getNodeSettings()); + String shardDataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + DATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogDataPath = Path.of(translogRepoPath + "/" + shardDataPath + "/1"); + String shardMetadataPath = getShardLevelBlobPath( + client(), + INDEX_NAME, + BlobPath.cleanPath(), + "0", + TRANSLOG, + METADATA, + translogPathFixedPrefix + ).buildAsString(); + Path translogMetadataPath = Path.of(translogRepoPath + "/" + shardMetadataPath); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertEquals(3, metadataFiles.size()); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }, 30, TimeUnit.SECONDS); + + keepPinnedTimestampSchedulerUpdated(); + client().admin().indices().prepareDelete(INDEX_NAME).get(); + + assertBusy(() -> { + List metadataFiles = Files.list(translogMetadataPath).collect(Collectors.toList()); + assertEquals(1, metadataFiles.size()); + + verifyTranslogDataFileCount(metadataFiles, translogDataPath); + }); + } + + private void verifyTranslogDataFileCount(List metadataFiles, Path translogDataPath) throws IOException { + List mdFiles = metadataFiles.stream().map(p -> p.getFileName().toString()).collect(Collectors.toList()); + Set generations = new HashSet<>(); + for (String mdFile : mdFiles) { + Tuple minMaxGen = TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(mdFile); + generations.addAll(LongStream.rangeClosed(minMaxGen.v1(), minMaxGen.v2()).boxed().collect(Collectors.toList())); + } + assertEquals(generations.size() * 2, Files.list(translogDataPath).collect(Collectors.toList()).size()); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java index 2fcda8c2d2f27..024e0e952eea5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStorePinnedTimestampsIT.java @@ -9,6 +9,8 @@ package org.opensearch.remotestore; import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.admin.cluster.node.stats.NodeStats; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -20,6 +22,8 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; +import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.REMOTE_STORE; + @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStorePinnedTimestampsIT extends RemoteStoreBaseIntegTestCase { static final String INDEX_NAME = "remote-store-test-idx-1"; @@ -180,4 +184,41 @@ public void onFailure(Exception e) { assertBusy(() -> assertEquals(Set.of(timestamp2, timestamp3), RemoteStorePinnedTimestampService.getPinnedTimestamps().v2())); remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); } + + public void testLastSuccessfulFetchOfPinnedTimestampsPresentInNodeStats() throws Exception { + logger.info("Starting up cluster manager"); + logger.info("cluster.remote_store.pinned_timestamps.enabled set to true"); + logger.info("cluster.remote_store.pinned_timestamps.scheduler_interval set to minimum value of 1minute"); + Settings pinnedTimestampEnabledSettings = Settings.builder() + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_SCHEDULER_INTERVAL.getKey(), "1m") + .build(); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampEnabledSettings); + String remoteNodeName = internalCluster().startDataOnlyNodes(1, pinnedTimestampEnabledSettings).get(0); + ensureStableCluster(2); + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + remoteNodeName + ); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + assertBusy(() -> { + long lastSuccessfulFetchOfPinnedTimestamps = RemoteStorePinnedTimestampService.getPinnedTimestamps().v1(); + assertTrue(lastSuccessfulFetchOfPinnedTimestamps > 0L); + NodesStatsResponse nodesStatsResponse = internalCluster().client() + .admin() + .cluster() + .prepareNodesStats() + .addMetric(REMOTE_STORE.metricName()) + .execute() + .actionGet(); + for (NodeStats nodeStats : nodesStatsResponse.getNodes()) { + long lastRecordedFetch = nodeStats.getRemoteStoreNodeStats().getLastSuccessfulFetchOfPinnedTimestamps(); + assertTrue(lastRecordedFetch >= lastSuccessfulFetchOfPinnedTimestamps); + } + }); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueMinutes(3)); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java index c6744ae62db60..69e85b13548e0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotV2IT.java @@ -34,11 +34,15 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; @@ -50,8 +54,10 @@ import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -134,27 +140,32 @@ public void testCloneShallowCopyV2() throws Exception { assertTrue(response.isAcknowledged()); awaitClusterManagerFinishRepoOperations(); + AtomicReference cloneSnapshotId = new AtomicReference<>(); // Validate that snapshot is present in repository data - PlainActionFuture repositoryDataPlainActionFutureClone = new PlainActionFuture<>(); - repository.getRepositoryData(repositoryDataPlainActionFutureClone); - - repositoryData = repositoryDataPlainActionFutureClone.get(); - assertEquals(repositoryData.getSnapshotIds().size(), 2); - boolean foundCloneInRepoData = false; - SnapshotId cloneSnapshotId = null; - for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { - if (snapshotId.getName().equals("test_clone_snapshot1")) { - foundCloneInRepoData = true; - cloneSnapshotId = snapshotId; + waitUntil(() -> { + PlainActionFuture repositoryDataPlainActionFutureClone = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFutureClone); + + RepositoryData repositoryData1; + try { + repositoryData1 = repositoryDataPlainActionFutureClone.get(); + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException(e); } - } - final SnapshotId cloneSnapshotIdFinal = cloneSnapshotId; + for (SnapshotId snapshotId : repositoryData1.getSnapshotIds()) { + if (snapshotId.getName().equals("test_clone_snapshot1")) { + cloneSnapshotId.set(snapshotId); + return true; + } + } + return false; + }, 90, TimeUnit.SECONDS); + + final SnapshotId cloneSnapshotIdFinal = cloneSnapshotId.get(); SnapshotInfo cloneSnapshotInfo = PlainActionFuture.get( f -> repository.threadPool().generic().execute(ActionRunnable.supply(f, () -> repository.getSnapshotInfo(cloneSnapshotIdFinal))) ); - assertTrue(foundCloneInRepoData); - assertThat(cloneSnapshotInfo.getPinnedTimestamp(), equalTo(sourceSnapshotInfo.getPinnedTimestamp())); for (String index : sourceSnapshotInfo.indices()) { assertTrue(cloneSnapshotInfo.indices().contains(index)); @@ -259,19 +270,21 @@ public void testCloneShallowCopyAfterDisablingV2() throws Exception { assertThat(sourceSnapshotInfoV1.state(), equalTo(SnapshotState.SUCCESS)); assertThat(sourceSnapshotInfoV1.successfulShards(), greaterThan(0)); assertThat(sourceSnapshotInfoV1.successfulShards(), equalTo(sourceSnapshotInfoV1.totalShards())); - assertThat(sourceSnapshotInfoV1.getPinnedTimestamp(), equalTo(0L)); + // assertThat(sourceSnapshotInfoV1.getPinnedTimestamp(), equalTo(0L)); + AtomicReference repositoryDataAtomicReference = new AtomicReference<>(); + awaitClusterManagerFinishRepoOperations(); // Validate that snapshot is present in repository data - PlainActionFuture repositoryDataV1PlainActionFuture = new PlainActionFuture<>(); - BlobStoreRepository repositoryV1 = (BlobStoreRepository) internalCluster().getCurrentClusterManagerNodeInstance( - RepositoriesService.class - ).repository(snapshotRepoName); - repositoryV1.getRepositoryData(repositoryDataV1PlainActionFuture); - - repositoryData = repositoryDataV1PlainActionFuture.get(); + assertBusy(() -> { + Metadata metadata = clusterAdmin().prepareState().get().getState().metadata(); + RepositoriesMetadata repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); + assertEquals(1, repositoriesMetadata.repository(snapshotRepoName).generation()); + assertEquals(1, repositoriesMetadata.repository(snapshotRepoName).pendingGeneration()); - assertTrue(repositoryData.getSnapshotIds().contains(sourceSnapshotInfoV1.snapshotId())); - assertEquals(repositoryData.getSnapshotIds().size(), 2); + GetSnapshotsRequest request = new GetSnapshotsRequest(snapshotRepoName); + GetSnapshotsResponse response = client().admin().cluster().getSnapshots(request).actionGet(); + assertEquals(2, response.getSnapshots().size()); + }, 30, TimeUnit.SECONDS); // clone should get created for v2 snapshot AcknowledgedResponse response = client().admin() @@ -289,31 +302,28 @@ public void testCloneShallowCopyAfterDisablingV2() throws Exception { ).repository(snapshotRepoName); repositoryCloneV2.getRepositoryData(repositoryDataCloneV2PlainActionFuture); - repositoryData = repositoryDataCloneV2PlainActionFuture.get(); - - assertEquals(repositoryData.getSnapshotIds().size(), 3); - boolean foundCloneInRepoData = false; - SnapshotId cloneSnapshotId = null; - for (SnapshotId snapshotId : repositoryData.getSnapshotIds()) { - if (snapshotId.getName().equals(cloneSnapshotV2)) { - foundCloneInRepoData = true; - cloneSnapshotId = snapshotId; - } - } - final SnapshotId cloneSnapshotIdFinal = cloneSnapshotId; - SnapshotInfo cloneSnapshotInfo = PlainActionFuture.get( - f -> repository.threadPool().generic().execute(ActionRunnable.supply(f, () -> repository.getSnapshotInfo(cloneSnapshotIdFinal))) - ); + // Validate that snapshot is present in repository data + assertBusy(() -> { + Metadata metadata = clusterAdmin().prepareState().get().getState().metadata(); + RepositoriesMetadata repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); + assertEquals(2, repositoriesMetadata.repository(snapshotRepoName).generation()); + assertEquals(2, repositoriesMetadata.repository(snapshotRepoName).pendingGeneration()); + GetSnapshotsRequest request = new GetSnapshotsRequest(snapshotRepoName); + GetSnapshotsResponse response2 = client().admin().cluster().getSnapshots(request).actionGet(); + assertEquals(3, response2.getSnapshots().size()); + }, 30, TimeUnit.SECONDS); - assertTrue(foundCloneInRepoData); // pinned timestamp value in clone snapshot v2 matches source snapshot v2 - assertThat(cloneSnapshotInfo.getPinnedTimestamp(), equalTo(sourceSnapshotInfo.getPinnedTimestamp())); - for (String index : sourceSnapshotInfo.indices()) { - assertTrue(cloneSnapshotInfo.indices().contains(index)); - + GetSnapshotsRequest request = new GetSnapshotsRequest(snapshotRepoName, new String[] { sourceSnapshotV2, cloneSnapshotV2 }); + GetSnapshotsResponse response2 = client().admin().cluster().getSnapshots(request).actionGet(); + + SnapshotInfo sourceInfo = response2.getSnapshots().get(0); + SnapshotInfo cloneInfo = response2.getSnapshots().get(1); + assertEquals(sourceInfo.getPinnedTimestamp(), cloneInfo.getPinnedTimestamp()); + assertEquals(sourceInfo.totalShards(), cloneInfo.totalShards()); + for (String index : sourceInfo.indices()) { + assertTrue(cloneInfo.indices().contains(index)); } - assertThat(cloneSnapshotInfo.totalShards(), equalTo(sourceSnapshotInfo.totalShards())); - } public void testRestoreFromClone() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index 15e92f6f7204b..26f8ab5dd8d09 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -50,6 +50,7 @@ import org.opensearch.common.util.concurrent.UncategorizedExecutionException; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.Strings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.discovery.AbstractDisruptionTestCase; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoryData; @@ -135,6 +136,60 @@ public void testLongRunningSnapshotAllowsConcurrentSnapshot() throws Exception { assertSuccessful(createSlowFuture); } + public void testSettingsUpdateFailWhenCreateSnapshotInProgress() throws Exception { + // Start a cluster with a cluster manager node and a data node + internalCluster().startClusterManagerOnlyNode(); + final String dataNode = internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + // Create a repository with random settings + Settings.Builder settings = randomRepositorySettings(); + createRepository(repoName, "mock", settings); + createIndexWithContent("index"); + // Start a full snapshot and block it on the data node + final ActionFuture createSlowFuture = startFullSnapshotBlockedOnDataNode( + "slow-snapshot", + repoName, + dataNode + ); + Thread.sleep(1000); // Wait for the snapshot to start + assertFalse(createSlowFuture.isDone()); // Ensure the snapshot is still in progress + // Attempt to update the repository settings while the snapshot is in progress + IllegalStateException ex = assertThrows(IllegalStateException.class, () -> updateRepository(repoName, "mock", settings)); + // Verify that the update fails with an appropriate exception + assertEquals("trying to modify or unregister repository that is currently used", ex.getMessage()); + unblockNode(repoName, dataNode); // Unblock the snapshot + assertSuccessful(createSlowFuture); // Ensure the snapshot completes successfully + } + + public void testSettingsUpdateFailWhenDeleteSnapshotInProgress() throws InterruptedException { + // Start a cluster with a cluster manager node and a data node + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + final String repoName = "test-repo"; + // Create a repository with random settings + Settings.Builder settings = randomRepositorySettings(); + createRepository(repoName, "mock", settings); + createIndexWithContent("index"); + final String snapshotName = "snapshot-1"; + // Create a full snapshot + SnapshotInfo snapshotInfo = createFullSnapshot(repoName, snapshotName); + assertEquals(SnapshotState.SUCCESS, snapshotInfo.state()); // Ensure the snapshot was successful + assertEquals(RestStatus.OK, snapshotInfo.status()); // Ensure the snapshot status is OK + // Start deleting the snapshot and block it on the cluster manager node + ActionFuture future = deleteSnapshotBlockedOnClusterManager(repoName, snapshotName); + Thread.sleep(1000); // Wait for the delete operation to start + assertFalse(future.isDone()); // Ensure the delete operation is still in progress + // Attempt to update the repository settings while the delete operation is in progress + IllegalStateException ex = assertThrows( + IllegalStateException.class, + () -> updateRepository(repoName, "mock", randomRepositorySettings()) + ); + // Verify that the update fails with an appropriate exception + assertEquals("trying to modify or unregister repository that is currently used", ex.getMessage()); + unblockNode(repoName, clusterManagerName); // Unblock the delete operation + assertAcked(future.actionGet()); // Wait for the delete operation to complete + } + public void testDeletesAreBatched() throws Exception { internalCluster().startClusterManagerOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java new file mode 100644 index 0000000000000..f20fddb6af26c --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsV2IT.java @@ -0,0 +1,486 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.Client; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.remotestore.RemoteSnapshotIT; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class ConcurrentSnapshotsV2IT extends RemoteSnapshotIT { + + public void testLongRunningSnapshotDontAllowConcurrentSnapshot() throws Exception { + final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String repoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(repoName, "mock", settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + blockClusterManagerOnWriteIndexFile(repoName); + + final ActionFuture snapshotFuture = startFullSnapshot(repoName, "snapshot-queued"); + awaitNumberOfSnapshotsInProgress(1); + + try { + String snapshotName = "snapshot-concurrent"; + client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).get(); + fail(); + } catch (Exception e) {} + + unblockNode(repoName, clusterManagerName); + CreateSnapshotResponse csr = snapshotFuture.actionGet(); + List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); + assertEquals(1, snapInfo.size()); + assertThat(snapInfo, contains(csr.getSnapshotInfo())); + } + + public void testCreateSnapshotFailInFinalize() throws Exception { + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String repoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(repoName, "mock", settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); + final ActionFuture snapshotFuture = startFullSnapshot(repoName, "snapshot-queued"); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + unblockNode(repoName, clusterManagerNode); + expectThrows(SnapshotException.class, snapshotFuture::actionGet); + + final ActionFuture snapshotFuture2 = startFullSnapshot(repoName, "snapshot-success"); + // Second create works out cleanly since the repo + CreateSnapshotResponse csr = snapshotFuture2.actionGet(); + + List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); + assertEquals(1, snapInfo.size()); + assertThat(snapInfo, contains(csr.getSnapshotInfo())); + } + + public void testCreateSnapshotV2MasterSwitch() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String repoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(repoName, "mock", settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + String clusterManagerNode = internalCluster().getClusterManagerName(); + + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); + final ActionFuture snapshotFuture = startFullSnapshot(repoName, "snapshot-queued"); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + + // Fail the cluster manager + stopNode(clusterManagerNode); + + ensureGreen(); + + final ActionFuture snapshotFuture2 = startFullSnapshot(repoName, "snapshot-success"); + // Second create works out cleanly since the repo + CreateSnapshotResponse csr = snapshotFuture2.actionGet(); + + List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); + assertEquals(1, snapInfo.size()); + assertThat(snapInfo, contains(csr.getSnapshotInfo())); + + } + + public void testPinnedTimestampFailSnapshot() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String repoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(repoName, "mock", settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + // fail segment repo - this is to fail the timestamp pinning + setFailRate(BASE_REMOTE_REPO, 100); + + try { + String snapshotName = "snapshot-fail"; + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .get(); + fail(); + } catch (Exception e) {} + + setFailRate(BASE_REMOTE_REPO, 0); + String snapshotName = "snapshot-success"; + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .get(); + + List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); + assertEquals(1, snapInfo.size()); + } + + public void testConcurrentSnapshotV2CreateOperation() throws InterruptedException, ExecutionException { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + int concurrentSnapshots = 5; + + // Prepare threads for concurrent snapshot creation + List threads = new ArrayList<>(); + + for (int i = 0; i < concurrentSnapshots; i++) { + int snapshotIndex = i; + Thread thread = new Thread(() -> { + try { + String snapshotName = "snapshot-concurrent-" + snapshotIndex; + CreateSnapshotResponse createSnapshotResponse2 = client().admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName) + .setWaitForCompletion(true) + .get(); + SnapshotInfo snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName)); + assertThat(snapshotInfo.getPinnedTimestamp(), greaterThan(0L)); + } catch (Exception e) {} + }); + threads.add(thread); + } + // start all threads + for (Thread thread : threads) { + thread.start(); + } + + // Wait for all threads to complete + for (Thread thread : threads) { + thread.join(); + } + + // Validate that only one snapshot has been created + Repository repository = internalCluster().getInstance(RepositoriesService.class).repository(snapshotRepoName); + PlainActionFuture repositoryDataPlainActionFuture = new PlainActionFuture<>(); + repository.getRepositoryData(repositoryDataPlainActionFuture); + + RepositoryData repositoryData = repositoryDataPlainActionFuture.get(); + assertThat(repositoryData.getSnapshotIds().size(), greaterThanOrEqualTo(1)); + } + + public void testLongRunningSnapshotDontAllowConcurrentClone() throws Exception { + final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String repoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(repoName, "mock", settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + String sourceSnap = "snapshot-source"; + + final CreateSnapshotResponse csr = startFullSnapshot(repoName, sourceSnap).actionGet(); + blockClusterManagerOnWriteIndexFile(repoName); + + final ActionFuture snapshotFuture = startCloneSnapshot(repoName, sourceSnap, "snapshot-clone"); + awaitNumberOfSnapshotsInProgress(1); + + final ActionFuture snapshotFuture2 = startCloneSnapshot(repoName, sourceSnap, "snapshot-clone-2"); + assertThrows(ConcurrentSnapshotExecutionException.class, snapshotFuture2::actionGet); + + unblockNode(repoName, clusterManagerName); + assertThrows(SnapshotException.class, snapshotFuture2::actionGet); + + snapshotFuture.get(); + + List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); + assertEquals(2, snapInfo.size()); + } + + public void testCloneSnapshotFailInFinalize() throws Exception { + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String repoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(repoName, "mock", settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + String sourceSnap = "snapshot-source"; + CreateSnapshotResponse sourceResp = startFullSnapshot(repoName, sourceSnap).actionGet(); + + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); + final ActionFuture snapshotFuture = startCloneSnapshot(repoName, sourceSnap, "snapshot-queued"); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + unblockNode(repoName, clusterManagerNode); + assertThrows(SnapshotException.class, snapshotFuture::actionGet); + + final ActionFuture snapshotFuture2 = startFullSnapshot(repoName, "snapshot-success"); + // Second create works out cleanly since the repo is cleaned up + CreateSnapshotResponse csr = snapshotFuture2.actionGet(); + + List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); + assertEquals(2, snapInfo.size()); + assertThat(snapInfo, containsInAnyOrder(csr.getSnapshotInfo(), sourceResp.getSnapshotInfo())); + } + + public void testCloneSnapshotV2MasterSwitch() throws Exception { + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + internalCluster().startClusterManagerOnlyNode(pinnedTimestampSettings()); + + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + internalCluster().startDataOnlyNode(pinnedTimestampSettings()); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String repoName = "test-create-snapshot-repo"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(repoName, "mock", settings); + + Client client = client(); + Settings indexSettings = getIndexSettings(20, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(15, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 10; + final int numDocsInIndex2 = 20; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + String sourceSnap = "snapshot-source"; + CreateSnapshotResponse csr = startFullSnapshot(repoName, sourceSnap).actionGet(); + + String clusterManagerNode = internalCluster().getClusterManagerName(); + + blockClusterManagerFromFinalizingSnapshotOnIndexFile(repoName); + final ActionFuture snapshotFuture = startCloneSnapshot(repoName, sourceSnap, "snapshot-queued"); + awaitNumberOfSnapshotsInProgress(1); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + + // Fail the cluster manager + stopNode(clusterManagerNode); + + ensureGreen(); + + final ActionFuture snapshotFuture2 = startFullSnapshot(repoName, "snapshot-success"); + // Second create works out cleanly since the repo + CreateSnapshotResponse csr2 = snapshotFuture2.actionGet(); + List snapInfo = client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(); + assertEquals(2, snapInfo.size()); + assertThat(snapInfo, containsInAnyOrder(csr.getSnapshotInfo(), csr2.getSnapshotInfo())); + } + + protected ActionFuture startCloneSnapshot(String repoName, String sourceSnapshotName, String snapshotName) { + logger.info("--> creating full snapshot [{}] to repo [{}]", snapshotName, repoName); + return clusterAdmin().prepareCloneSnapshot(repoName, sourceSnapshotName, snapshotName).setIndices("*").execute(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java index 1d7a58384c0be..7b2ad2bccd2b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotV2IT.java @@ -23,19 +23,32 @@ import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchIntegTestCase; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; +import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThan; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class DeleteSnapshotV2IT extends AbstractSnapshotIntegTestCase { private static final String REMOTE_REPO_NAME = "remote-store-repo-name"; + private void keepPinnedTimestampSchedulerUpdated() throws InterruptedException { + long currentTime = System.currentTimeMillis(); + int maxRetry = 10; + while (maxRetry > 0 && RemoteStorePinnedTimestampService.getPinnedTimestamps().v1() <= currentTime) { + Thread.sleep(1000); + maxRetry--; + } + } + public void testDeleteShallowCopyV2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); @@ -74,8 +87,8 @@ public void testDeleteShallowCopyV2() throws Exception { createIndex(indexName1, getRemoteStoreBackedIndexSettings()); createIndex(indexName2, getRemoteStoreBackedIndexSettings()); - final int numDocsInIndex1 = 10; - final int numDocsInIndex2 = 20; + final int numDocsInIndex1 = 1; + final int numDocsInIndex2 = 2; indexRandomDocs(indexName1, numDocsInIndex1); indexRandomDocs(indexName2, numDocsInIndex2); ensureGreen(indexName1, indexName2); @@ -92,7 +105,7 @@ public void testDeleteShallowCopyV2() throws Exception { assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName1)); createIndex(indexName3, getRemoteStoreBackedIndexSettings()); - indexRandomDocs(indexName3, 10); + indexRandomDocs(indexName3, 1); CreateSnapshotResponse createSnapshotResponse2 = client().admin() .cluster() .prepareCreateSnapshot(snapshotRepoName, snapshotName2) @@ -105,109 +118,101 @@ public void testDeleteShallowCopyV2() throws Exception { assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName2)); assertAcked(client().admin().indices().prepareDelete(indexName1)); - Thread.sleep(100); - AcknowledgedResponse deleteResponse = client().admin() - .cluster() - .prepareDeleteSnapshot(snapshotRepoName, snapshotName2) - .setSnapshots(snapshotName2) - .get(); + AcknowledgedResponse deleteResponse = client().admin().cluster().prepareDeleteSnapshot(snapshotRepoName, snapshotName2).get(); assertTrue(deleteResponse.isAcknowledged()); // test delete non-existent snapshot assertThrows( SnapshotMissingException.class, - () -> client().admin().cluster().prepareDeleteSnapshot(snapshotRepoName, "random-snapshot").setSnapshots(snapshotName2).get() + () -> client().admin().cluster().prepareDeleteSnapshot(snapshotRepoName, "random-snapshot").get() ); - } - public void testDeleteShallowCopyV2MultipleSnapshots() throws Exception { + public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); + Settings settings = remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath); + settings = Settings.builder() + .put(settings) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), RemoteStoreEnums.PathType.FIXED.toString()) + .build(); + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNode(settings); + final Client clusterManagerClient = internalCluster().clusterManagerClient(); + ensureStableCluster(2); - internalCluster().startClusterManagerOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); - internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); - internalCluster().startDataOnlyNode(snapshotV2Settings(remoteStoreRepoPath)); - - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String indexName3 = "testindex3"; - String snapshotRepoName = "test-create-snapshot-repo"; - String snapshotName1 = "test-create-snapshot1"; - String snapshotName2 = "test-create-snapshot2"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - logger.info("Snapshot Path [{}]", absolutePath1); - - Client client = client(); - - assertAcked( - client.admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + clusterManagerName ); - createIndex(indexName1, getRemoteStoreBackedIndexSettings()); + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowV2(snapshotRepoPath)); - createIndex(indexName2, getRemoteStoreBackedIndexSettings()); + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, 25); - final int numDocsInIndex1 = 10; - final int numDocsInIndex2 = 20; - indexRandomDocs(indexName1, numDocsInIndex1); - indexRandomDocs(indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreEnabledIndexName) + .get() + .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_INDEX_UUID); + logger.info("--> create two remote index shallow snapshots"); CreateSnapshotResponse createSnapshotResponse = client().admin() .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .prepareCreateSnapshot(snapshotRepoName, "snap1") .setWaitForCompletion(true) .get(); - SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); - assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.successfulShards(), greaterThan(0)); - assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); - assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName1)); + SnapshotInfo snapshotInfo1 = createSnapshotResponse.getSnapshotInfo(); - createIndex(indexName3, getRemoteStoreBackedIndexSettings()); - indexRandomDocs(indexName3, 10); + Path indexPath = Path.of(String.valueOf(remoteStoreRepoPath), indexUUID); + Path shardPath = Path.of(String.valueOf(indexPath), "0"); - CreateSnapshotResponse createSnapshotResponse2 = client().admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(true) - .get(); - snapshotInfo = createSnapshotResponse2.getSnapshotInfo(); - assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); - assertThat(snapshotInfo.successfulShards(), greaterThan(0)); - assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); - assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName2)); + // delete remote store index + assertAcked(client().admin().indices().prepareDelete(remoteStoreEnabledIndexName)); + + logger.info("--> delete snapshot 1"); + + Path segmentsPath = Path.of(String.valueOf(shardPath), "segments"); + Path translogPath = Path.of(String.valueOf(shardPath), "translog"); - AcknowledgedResponse deleteResponse = client().admin() + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + + keepPinnedTimestampSchedulerUpdated(); + + AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() .cluster() - .prepareDeleteSnapshot(snapshotRepoName, snapshotName1, snapshotName2) - .setSnapshots(snapshotName2) + .prepareDeleteSnapshot(snapshotRepoName, snapshotInfo1.snapshotId().getName()) .get(); - assertTrue(deleteResponse.isAcknowledged()); + assertAcked(deleteSnapshotResponse); - // test delete non-existent snapshot - assertThrows( - SnapshotMissingException.class, - () -> client().admin().cluster().prepareDeleteSnapshot(snapshotRepoName, "random-snapshot").setSnapshots(snapshotName2).get() - ); + // Delete is async. Give time for it + assertBusy(() -> { + try { + assertEquals(0, RemoteStoreBaseIntegTestCase.getFileCount(segmentsPath)); + } catch (NoSuchFileException e) { + fail(); + } + }, 60, TimeUnit.SECONDS); + assertBusy(() -> { + try { + assertEquals(0, RemoteStoreBaseIntegTestCase.getFileCount(translogPath)); + } catch (NoSuchFileException e) { + fail(); + } + }, 60, TimeUnit.SECONDS); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/15692") - public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2() throws Exception { + public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2MultipleSnapshots() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); final Path remoteStoreRepoPath = randomRepoPath(); Settings settings = remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath); @@ -242,11 +247,11 @@ public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2() throws Exceptio .get() .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_INDEX_UUID); - String numShards = client().admin() - .indices() - .prepareGetSettings(remoteStoreEnabledIndexName) - .get() - .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_NUMBER_OF_SHARDS); + Path indexPath = Path.of(String.valueOf(remoteStoreRepoPath), indexUUID); + Path shardPath = Path.of(String.valueOf(indexPath), "0"); + + Path segmentsPath = Path.of(String.valueOf(shardPath), "segments", "data"); + Path translogPath = Path.of(String.valueOf(shardPath), "translog", "data", "1"); logger.info("--> create two remote index shallow snapshots"); CreateSnapshotResponse createSnapshotResponse = client().admin() @@ -256,6 +261,11 @@ public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2() throws Exceptio .get(); SnapshotInfo snapshotInfo1 = createSnapshotResponse.getSnapshotInfo(); + List segmentsPostSnapshot1 = Files.list(segmentsPath).collect(Collectors.toList()); + List translogPostSnapshot1 = Files.list(translogPath).collect(Collectors.toList()); + + forceMerge(1); + refresh(remoteStoreEnabledIndexName); indexRandomDocs(remoteStoreEnabledIndexName, 25); CreateSnapshotResponse createSnapshotResponse2 = client().admin() @@ -264,70 +274,151 @@ public void testRemoteStoreCleanupForDeletedIndexForSnapshotV2() throws Exceptio .setWaitForCompletion(true) .get(); SnapshotInfo snapshotInfo2 = createSnapshotResponse2.getSnapshotInfo(); + + List segmentsPostSnapshot2 = Files.list(segmentsPath).collect(Collectors.toList()); + List translogPostSnapshot2 = Files.list(translogPath).collect(Collectors.toList()); + assertThat(snapshotInfo2.state(), equalTo(SnapshotState.SUCCESS)); assertThat(snapshotInfo2.successfulShards(), greaterThan(0)); assertThat(snapshotInfo2.successfulShards(), equalTo(snapshotInfo2.totalShards())); assertThat(snapshotInfo2.snapshotId().getName(), equalTo("snap2")); + assertBusy(() -> assertTrue(translogPostSnapshot2.size() > translogPostSnapshot1.size()), 60, TimeUnit.SECONDS); + assertBusy(() -> assertTrue(segmentsPostSnapshot2.size() > segmentsPostSnapshot1.size()), 60, TimeUnit.SECONDS); + + keepPinnedTimestampSchedulerUpdated(); + // delete remote store index assertAcked(client().admin().indices().prepareDelete(remoteStoreEnabledIndexName)); - logger.info("--> delete snapshot 2"); + logger.info("--> delete snapshot 1"); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + keepPinnedTimestampSchedulerUpdated(); + // on snapshot deletion, remote store segment files should get cleaned up for deleted index - `remote-index-1` + AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, snapshotInfo1.snapshotId().getName()) + .get(); + assertAcked(deleteSnapshotResponse); + + // Delete is async. Give time for it + assertBusy(() -> { + List segmentsPostDeletionOfSnapshot1 = Files.list(segmentsPath).collect(Collectors.toList()); + assertTrue(segmentsPostDeletionOfSnapshot1.size() < segmentsPostSnapshot2.size()); + }, 60, TimeUnit.SECONDS); + // To uncomment following, we need to handle deletion of generations in translog cleanup flow + // List translogPostDeletionOfSnapshot1 = Files.list(translogPath).collect(Collectors.toList()); + // Delete is async. Give time for it + // assertBusy(() -> assertEquals(translogPostSnapshot2.size() - translogPostSnapshot1.size(), + // translogPostDeletionOfSnapshot1.size()), 60, TimeUnit.SECONDS); + } + + public void testRemoteStoreCleanupMultiplePrimaryOnSnapshotDeletion() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used in the test"); + final Path remoteStoreRepoPath = randomRepoPath(); + Settings settings = remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath); + settings = Settings.builder() + .put(settings) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PATH_TYPE_SETTING.getKey(), RemoteStoreEnums.PathType.FIXED.toString()) + .build(); + String clusterManagerName = internalCluster().startClusterManagerOnlyNode(settings); + internalCluster().startDataOnlyNodes(3, settings); + final Client clusterManagerClient = internalCluster().clusterManagerClient(); + ensureStableCluster(4); + + RemoteStorePinnedTimestampService remoteStorePinnedTimestampService = internalCluster().getInstance( + RemoteStorePinnedTimestampService.class, + clusterManagerName + ); + remoteStorePinnedTimestampService.rescheduleAsyncUpdatePinnedTimestampTask(TimeValue.timeValueSeconds(1)); + RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowV2(snapshotRepoPath)); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = Settings.builder() + .put(getRemoteStoreBackedIndexSettings()) + .put(INDEX_REMOTE_TRANSLOG_KEEP_EXTRA_GEN_SETTING.getKey(), 2) + .build(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + ensureGreen(remoteStoreEnabledIndexName); + + // Create 2 snapshots for primary term 1 + keepPinnedTimestampSchedulerUpdated(); + indexRandomDocs(remoteStoreEnabledIndexName, 5); + createSnapshot(snapshotRepoName, "snap1"); + keepPinnedTimestampSchedulerUpdated(); + indexRandomDocs(remoteStoreEnabledIndexName, 5); + createSnapshot(snapshotRepoName, "snap2"); + + // Restart current primary to change the primary term + internalCluster().restartNode(primaryNodeName(remoteStoreEnabledIndexName)); + ensureGreen(remoteStoreEnabledIndexName); + + // Create 2 snapshots for primary term 2 + keepPinnedTimestampSchedulerUpdated(); + indexRandomDocs(remoteStoreEnabledIndexName, 5); + createSnapshot(snapshotRepoName, "snap3"); + keepPinnedTimestampSchedulerUpdated(); + indexRandomDocs(remoteStoreEnabledIndexName, 5); + createSnapshot(snapshotRepoName, "snap4"); + + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreEnabledIndexName) + .get() + .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_INDEX_UUID); Path indexPath = Path.of(String.valueOf(remoteStoreRepoPath), indexUUID); Path shardPath = Path.of(String.valueOf(indexPath), "0"); - Path segmentsPath = Path.of(String.valueOf(shardPath), "segments"); - Path translogPath = Path.of(String.valueOf(shardPath), "translog"); + Path translogPath = Path.of(String.valueOf(shardPath), "translog", "data", "1"); + + // Deleting snap1 will still keep files in primary term 1 due to snap2 + deleteSnapshot(clusterManagerClient, snapshotRepoName, "snap1"); + assertTrue(RemoteStoreBaseIntegTestCase.getFileCount(translogPath) > 0); - // Get total segments remote store directory file count for deleted index and shard 0 - int segmentFilesCountBeforeDeletingSnapshot1 = RemoteStoreBaseIntegTestCase.getFileCount(segmentsPath); - int translogFilesCountBeforeDeletingSnapshot1 = RemoteStoreBaseIntegTestCase.getFileCount(translogPath); + // Deleting snap2 will not remove primary term 1 as we need to trigger trimUnreferencedReaders once + deleteSnapshot(clusterManagerClient, snapshotRepoName, "snap2"); + assertTrue(RemoteStoreBaseIntegTestCase.getFileCount(translogPath) > 0); + // Index a doc to trigger trimUnreferencedReaders RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + keepPinnedTimestampSchedulerUpdated(); + indexRandomDocs(remoteStoreEnabledIndexName, 5); - AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() + assertBusy(() -> assertFalse(Files.exists(translogPath)), 30, TimeUnit.SECONDS); + } + + private void createSnapshot(String repoName, String snapshotName) { + CreateSnapshotResponse createSnapshotResponse = client().admin() .cluster() - .prepareDeleteSnapshot(snapshotRepoName, snapshotInfo2.snapshotId().getName()) + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) .get(); - assertAcked(deleteSnapshotResponse); - - Thread.sleep(5000); + SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); - assertBusy(() -> { - try { - assertThat(RemoteStoreBaseIntegTestCase.getFileCount(segmentsPath), lessThan(segmentFilesCountBeforeDeletingSnapshot1)); - } catch (Exception e) {} - }, 30, TimeUnit.SECONDS); - int segmentFilesCountAfterDeletingSnapshot1 = RemoteStoreBaseIntegTestCase.getFileCount(segmentsPath); + assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS)); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + assertThat(snapshotInfo.snapshotId().getName(), equalTo(snapshotName)); + } - logger.info("--> delete snapshot 1"); - RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); - // on snapshot deletion, remote store segment files should get cleaned up for deleted index - `remote-index-1` - deleteSnapshotResponse = clusterManagerClient.admin() + private void deleteSnapshot(Client clusterManagerClient, String repoName, String snapshotName) { + AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() .cluster() - .prepareDeleteSnapshot(snapshotRepoName, snapshotInfo1.snapshotId().getName()) + .prepareDeleteSnapshot(repoName, snapshotName) .get(); assertAcked(deleteSnapshotResponse); - - // Delete is async. Give time for it - assertBusy(() -> { - try { - assertThat(RemoteStoreBaseIntegTestCase.getFileCount(segmentsPath), lessThan(segmentFilesCountAfterDeletingSnapshot1)); - } catch (Exception e) {} - }, 60, TimeUnit.SECONDS); - - assertBusy(() -> { - try { - assertThat(RemoteStoreBaseIntegTestCase.getFileCount(translogPath), lessThan(translogFilesCountBeforeDeletingSnapshot1)); - } catch (Exception e) {} - }, 60, TimeUnit.SECONDS); - } private Settings snapshotV2Settings(Path remoteStoreRepoPath) { Settings settings = Settings.builder() .put(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)) .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), true) + .put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_METADATA.getKey(), false) .build(); return settings; } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index c3214022df663..8b6869aa1d81a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -116,7 +116,7 @@ public void testStatusApiConsistency() { assertEquals(snapshotStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); } - public void testStatusAPICallForShallowCopySnapshot() { + public void testStatusAPICallForShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); @@ -136,15 +136,24 @@ public void testStatusAPICallForShallowCopySnapshot() { final String snapshot = "snapshot"; createFullSnapshot(snapshotRepoName, snapshot); - final SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, snapshot); - assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); + assertBusy(() -> { + final SnapshotStatus snapshotStatus = client().admin() + .cluster() + .prepareSnapshotStatus(snapshotRepoName) + .setSnapshots(snapshot) + .execute() + .actionGet() + .getSnapshots() + .get(0); + assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); - final SnapshotIndexShardStatus snapshotShardState = stateFirstShard(snapshotStatus, indexName); - assertThat(snapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); - assertThat(snapshotShardState.getStats().getTotalFileCount(), greaterThan(0)); - assertThat(snapshotShardState.getStats().getTotalSize(), greaterThan(0L)); - assertThat(snapshotShardState.getStats().getIncrementalFileCount(), greaterThan(0)); - assertThat(snapshotShardState.getStats().getIncrementalSize(), greaterThan(0L)); + final SnapshotIndexShardStatus snapshotShardState = stateFirstShard(snapshotStatus, indexName); + assertThat(snapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); + assertThat(snapshotShardState.getStats().getTotalFileCount(), greaterThan(0)); + assertThat(snapshotShardState.getStats().getTotalSize(), greaterThan(0L)); + assertThat(snapshotShardState.getStats().getIncrementalFileCount(), greaterThan(0)); + assertThat(snapshotShardState.getStats().getIncrementalSize(), greaterThan(0L)); + }, 20, TimeUnit.SECONDS); } public void testStatusAPICallInProgressSnapshot() throws Exception { @@ -193,7 +202,7 @@ public void testExceptionOnMissingSnapBlob() throws IOException { ); } - public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { + public void testExceptionOnMissingShardLevelSnapBlob() throws Exception { disableRepoConsistencyCheck("This test intentionally corrupts the repository"); final Path repoPath = randomRepoPath(); @@ -216,11 +225,12 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { repoPath.resolve(resolvePath(indexId, "0")) .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") ); - - expectThrows( - SnapshotMissingException.class, - () -> client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").execute().actionGet() - ); + assertBusy(() -> { + expectThrows( + SnapshotMissingException.class, + () -> client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").execute().actionGet() + ); + }, 20, TimeUnit.SECONDS); } public void testGetSnapshotsWithoutIndices() throws Exception { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 3fe0f1dc7cb83..937d7509fe68c 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -461,6 +461,9 @@ import org.opensearch.rest.action.ingest.RestGetPipelineAction; import org.opensearch.rest.action.ingest.RestPutPipelineAction; import org.opensearch.rest.action.ingest.RestSimulatePipelineAction; +import org.opensearch.rest.action.list.AbstractListAction; +import org.opensearch.rest.action.list.RestIndicesListAction; +import org.opensearch.rest.action.list.RestListAction; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; import org.opensearch.rest.action.search.RestCreatePitAction; @@ -802,9 +805,14 @@ private ActionFilters setupActionFilters(List actionPlugins) { public void initRestHandlers(Supplier nodesInCluster) { List catActions = new ArrayList<>(); + List listActions = new ArrayList<>(); Consumer registerHandler = handler -> { if (handler instanceof AbstractCatAction) { - catActions.add((AbstractCatAction) handler); + if (handler instanceof AbstractListAction && ((AbstractListAction) handler).isActionPaginated()) { + listActions.add((AbstractListAction) handler); + } else { + catActions.add((AbstractCatAction) handler); + } } restController.registerHandler(handler); }; @@ -980,6 +988,9 @@ public void initRestHandlers(Supplier nodesInCluster) { } registerHandler.accept(new RestTemplatesAction()); + // LIST API + registerHandler.accept(new RestIndicesListAction()); + // Point in time API registerHandler.accept(new RestCreatePitAction()); registerHandler.accept(new RestDeletePitAction()); @@ -1011,6 +1022,7 @@ public void initRestHandlers(Supplier nodesInCluster) { } } registerHandler.accept(new RestCatAction(catActions)); + registerHandler.accept(new RestListAction(listActions)); registerHandler.accept(new RestDecommissionAction()); registerHandler.accept(new RestGetDecommissionStateAction()); registerHandler.accept(new RestRemoteStoreStatsAction()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 0917a0baff1ab..c91260778f037 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -59,6 +59,7 @@ import org.opensearch.monitor.process.ProcessStats; import org.opensearch.node.AdaptiveSelectionStats; import org.opensearch.node.NodesResourceUsageStats; +import org.opensearch.node.remotestore.RemoteStoreNodeStats; import org.opensearch.ratelimitting.admissioncontrol.stats.AdmissionControlStats; import org.opensearch.repositories.RepositoriesStats; import org.opensearch.script.ScriptCacheStats; @@ -162,6 +163,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private NodeCacheStats nodeCacheStats; + @Nullable + private RemoteStoreNodeStats remoteStoreNodeStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -243,6 +247,12 @@ public NodeStats(StreamInput in) throws IOException { } else { nodeCacheStats = null; } + // TODO: change version to V_2_18_0 + if (in.getVersion().onOrAfter(Version.CURRENT)) { + remoteStoreNodeStats = in.readOptionalWriteable(RemoteStoreNodeStats::new); + } else { + remoteStoreNodeStats = null; + } } public NodeStats( @@ -274,7 +284,8 @@ public NodeStats( @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, @Nullable RepositoriesStats repositoriesStats, @Nullable AdmissionControlStats admissionControlStats, - @Nullable NodeCacheStats nodeCacheStats + @Nullable NodeCacheStats nodeCacheStats, + @Nullable RemoteStoreNodeStats remoteStoreNodeStats ) { super(node); this.timestamp = timestamp; @@ -305,6 +316,7 @@ public NodeStats( this.repositoriesStats = repositoriesStats; this.admissionControlStats = admissionControlStats; this.nodeCacheStats = nodeCacheStats; + this.remoteStoreNodeStats = remoteStoreNodeStats; } public long getTimestamp() { @@ -467,6 +479,11 @@ public NodeCacheStats getNodeCacheStats() { return nodeCacheStats; } + @Nullable + public RemoteStoreNodeStats getRemoteStoreNodeStats() { + return remoteStoreNodeStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -525,6 +542,10 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_14_0)) { out.writeOptionalWriteable(nodeCacheStats); } + // TODO: change version to V_2_18_0 + if (out.getVersion().onOrAfter(Version.CURRENT)) { + out.writeOptionalWriteable(remoteStoreNodeStats); + } } @Override @@ -631,6 +652,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getNodeCacheStats() != null) { getNodeCacheStats().toXContent(builder, params); } + if (getRemoteStoreNodeStats() != null) { + getRemoteStoreNodeStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index f1f9f93afdad2..a5b00ed82d3cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -220,7 +220,8 @@ public enum Metric { SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), REPOSITORIES("repositories"), ADMISSION_CONTROL("admission_control"), - CACHE_STATS("caches"); + CACHE_STATS("caches"), + REMOTE_STORE("remote_store"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 2c808adc97c7a..a98d245af872b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -129,7 +129,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics), - NodesStatsRequest.Metric.CACHE_STATS.containedIn(metrics) + NodesStatsRequest.Metric.CACHE_STATS.containedIn(metrics), + NodesStatsRequest.Metric.REMOTE_STORE.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a49ca2035783c..c4b3524cf6da5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -174,6 +174,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index a8b97d0f344ae..1a3c657f5b1b8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -37,6 +37,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -88,6 +89,7 @@ public class TransportIndicesShardStoresAction extends TransportClusterManagerNo private static final Logger logger = LogManager.getLogger(TransportIndicesShardStoresAction.class); private final TransportNodesListGatewayStartedShards listShardStoresInfo; + private final ClusterManagerMetrics clusterManagerMetrics; @Inject public TransportIndicesShardStoresAction( @@ -96,7 +98,8 @@ public TransportIndicesShardStoresAction( ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportNodesListGatewayStartedShards listShardStoresInfo + TransportNodesListGatewayStartedShards listShardStoresInfo, + ClusterManagerMetrics clusterManagerMetrics ) { super( IndicesShardStoresAction.NAME, @@ -109,6 +112,7 @@ public TransportIndicesShardStoresAction( true ); this.listShardStoresInfo = listShardStoresInfo; + this.clusterManagerMetrics = clusterManagerMetrics; } @Override @@ -154,7 +158,7 @@ protected void clusterManagerOperation( // we could fetch all shard store info from every node once (nNodes requests) // we have to implement a TransportNodesAction instead of using TransportNodesListGatewayStartedShards // for fetching shard stores info, that operates on a list of shards instead of a single shard - new AsyncShardStoresInfoFetches(state.nodes(), routingNodes, shardsToFetch, listener).start(); + new AsyncShardStoresInfoFetches(state.nodes(), routingNodes, shardsToFetch, listener, clusterManagerMetrics).start(); } @Override @@ -175,12 +179,14 @@ private class AsyncShardStoresInfoFetches { private final ActionListener listener; private CountDown expectedOps; private final Queue fetchResponses; + private final ClusterManagerMetrics clusterManagerMetrics; AsyncShardStoresInfoFetches( DiscoveryNodes nodes, RoutingNodes routingNodes, Set> shards, - ActionListener listener + ActionListener listener, + ClusterManagerMetrics clusterManagerMetrics ) { this.nodes = nodes; this.routingNodes = routingNodes; @@ -188,6 +194,7 @@ private class AsyncShardStoresInfoFetches { this.listener = listener; this.fetchResponses = new ConcurrentLinkedQueue<>(); this.expectedOps = new CountDown(shards.size()); + this.clusterManagerMetrics = clusterManagerMetrics; } void start() { @@ -195,7 +202,14 @@ void start() { listener.onResponse(new IndicesShardStoresResponse()); } else { for (Tuple shard : shards) { - InternalAsyncFetch fetch = new InternalAsyncFetch(logger, "shard_stores", shard.v1(), shard.v2(), listShardStoresInfo); + InternalAsyncFetch fetch = new InternalAsyncFetch( + logger, + "shard_stores", + shard.v1(), + shard.v2(), + listShardStoresInfo, + clusterManagerMetrics + ); fetch.fetchData(nodes, Collections.emptyMap()); } } @@ -213,9 +227,10 @@ private class InternalAsyncFetch extends AsyncShardFetch deciderList; final ShardsAllocator shardsAllocator; + private final ClusterManagerMetrics clusterManagerMetrics; public ClusterModule( Settings settings, @@ -166,6 +167,7 @@ public ClusterModule( settings, clusterManagerMetrics ); + this.clusterManagerMetrics = clusterManagerMetrics; } public static List getNamedWriteables() { @@ -456,6 +458,7 @@ protected void configure() { bind(TaskResultsService.class).asEagerSingleton(); bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); + bind(ClusterManagerMetrics.class).toInstance(clusterManagerMetrics); } public void setExistingShardsAllocators(GatewayAllocator gatewayAllocator, ShardsBatchGatewayAllocator shardsBatchGatewayAllocator) { diff --git a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java index 1c12c260b3929..8ce11c8183cf6 100644 --- a/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/opensearch/cluster/NodeConnectionsService.java @@ -103,10 +103,10 @@ public class NodeConnectionsService extends AbstractLifecycleComponent { // contains an entry for every node in the latest cluster state, as well as for nodes from which we are in the process of // disconnecting - private final Map targetsByNode = new HashMap<>(); + protected final Map targetsByNode = new HashMap<>(); private final TimeValue reconnectInterval; - private volatile ConnectionChecker connectionChecker; + protected volatile ConnectionChecker connectionChecker; @Inject public NodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) { @@ -115,6 +115,11 @@ public NodeConnectionsService(Settings settings, ThreadPool threadPool, Transpor this.reconnectInterval = NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.get(settings); } + // exposed for testing + protected ConnectionTarget createConnectionTarget(DiscoveryNode discoveryNode) { + return new ConnectionTarget(discoveryNode); + } + /** * Connect to all the given nodes, but do not disconnect from any extra nodes. Calls the completion handler on completion of all * connection attempts to _new_ nodes, but not on attempts to re-establish connections to nodes that are already known. @@ -159,6 +164,14 @@ public void connectToNodes(DiscoveryNodes discoveryNodes, Runnable onCompletion) runnables.forEach(Runnable::run); } + public void setPendingDisconnections(Set nodes) { + nodes.forEach(transportService::setPendingDisconnection); + } + + public void clearPendingDisconnections() { + transportService.clearPendingDisconnections(); + } + /** * Disconnect from any nodes to which we are currently connected which do not appear in the given nodes. Does not wait for the * disconnections to complete, because they might have to wait for ongoing connection attempts first. @@ -211,7 +224,7 @@ private void awaitPendingActivity(Runnable onCompletion) { * nodes which are in the process of disconnecting. The onCompletion handler is called after all ongoing connection/disconnection * attempts have completed. */ - private void connectDisconnectedTargets(Runnable onCompletion) { + protected void connectDisconnectedTargets(Runnable onCompletion) { final List runnables = new ArrayList<>(); synchronized (mutex) { final Collection connectionTargets = targetsByNode.values(); @@ -321,7 +334,7 @@ private enum ActivityType { * * @opensearch.internal */ - private class ConnectionTarget { + protected class ConnectionTarget { private final DiscoveryNode discoveryNode; private PlainListenableActionFuture future = PlainListenableActionFuture.newListenableFuture(); diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index d658f38430dd9..e3f98a3f61534 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -134,6 +134,38 @@ public static Entry startedEntry( ); } + public static Entry startedEntry( + Snapshot snapshot, + boolean includeGlobalState, + boolean partial, + List indices, + List dataStreams, + long startTime, + long repositoryStateId, + final Map shards, + Map userMetadata, + Version version, + boolean remoteStoreIndexShallowCopy, + boolean remoteStoreIndexShallowCopyV2 + ) { + return new SnapshotsInProgress.Entry( + snapshot, + includeGlobalState, + partial, + completed(shards.values()) ? State.SUCCESS : State.STARTED, + indices, + dataStreams, + startTime, + repositoryStateId, + shards, + null, + userMetadata, + version, + remoteStoreIndexShallowCopy, + remoteStoreIndexShallowCopyV2 + ); + } + /** * Creates the initial snapshot clone entry * @@ -168,8 +200,39 @@ public static Entry startClone( version, source, Map.of(), - false // initialising to false, will be updated in startCloning method of SnapshotsService while updating entry with - // clone jobs + false, + false// initialising to false, will be updated in startCloning method of SnapshotsService while updating entry with + // clone jobs + ); + } + + public static Entry startClone( + Snapshot snapshot, + SnapshotId source, + List indices, + long startTime, + long repositoryStateId, + Version version, + boolean remoteStoreIndexShallowCopyV2 + ) { + return new SnapshotsInProgress.Entry( + snapshot, + true, + false, + State.STARTED, + indices, + Collections.emptyList(), + startTime, + repositoryStateId, + Map.of(), + null, + Collections.emptyMap(), + version, + source, + Map.of(), + remoteStoreIndexShallowCopyV2, + remoteStoreIndexShallowCopyV2// initialising to false, will be updated in startCloning method of SnapshotsService + // while updating entry with clone jobs ); } @@ -183,6 +246,8 @@ public static class Entry implements Writeable, ToXContent, RepositoryOperation private final Snapshot snapshot; private final boolean includeGlobalState; private final boolean remoteStoreIndexShallowCopy; + + private final boolean remoteStoreIndexShallowCopyV2; private final boolean partial; /** * Map of {@link ShardId} to {@link ShardSnapshotStatus} tracking the state of each shard snapshot operation. @@ -212,6 +277,42 @@ public static class Entry implements Writeable, ToXContent, RepositoryOperation @Nullable private final String failure; + public Entry( + Snapshot snapshot, + boolean includeGlobalState, + boolean partial, + State state, + List indices, + List dataStreams, + long startTime, + long repositoryStateId, + final Map shards, + String failure, + Map userMetadata, + Version version, + boolean remoteStoreIndexShallowCopy, + boolean remoteStoreIndexShallowCopyV2 + ) { + this( + snapshot, + includeGlobalState, + partial, + state, + indices, + dataStreams, + startTime, + repositoryStateId, + shards, + failure, + userMetadata, + version, + null, + Map.of(), + remoteStoreIndexShallowCopy, + remoteStoreIndexShallowCopyV2 + ); + } + // visible for testing, use #startedEntry and copy constructors in production code public Entry( Snapshot snapshot, @@ -243,7 +344,8 @@ public Entry( version, null, Map.of(), - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + false ); } @@ -262,7 +364,8 @@ private Entry( Version version, @Nullable SnapshotId source, @Nullable final Map clones, - boolean remoteStoreIndexShallowCopy + boolean remoteStoreIndexShallowCopy, + boolean remoteStoreIndexShallowCopyV2 ) { this.state = state; this.snapshot = snapshot; @@ -284,7 +387,9 @@ private Entry( this.clones = Collections.unmodifiableMap(clones); } this.remoteStoreIndexShallowCopy = remoteStoreIndexShallowCopy; - assert assertShardsConsistent(this.source, this.state, this.indices, this.shards, this.clones); + this.remoteStoreIndexShallowCopyV2 = remoteStoreIndexShallowCopyV2; + assert this.remoteStoreIndexShallowCopyV2 + || assertShardsConsistent(this.source, this.state, this.indices, this.shards, this.clones); } private Entry(StreamInput in) throws IOException { @@ -307,6 +412,11 @@ private Entry(StreamInput in) throws IOException { } else { remoteStoreIndexShallowCopy = false; } + if (in.getVersion().onOrAfter(Version.V_2_18_0)) { + remoteStoreIndexShallowCopyV2 = in.readBoolean(); + } else { + remoteStoreIndexShallowCopyV2 = false; + } } private static boolean assertShardsConsistent( @@ -428,7 +538,8 @@ public Entry withRepoGen(long newRepoGen) { version, source, clones, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + remoteStoreIndexShallowCopyV2 ); } @@ -451,7 +562,8 @@ public Entry withClones(final Map update version, source, updatedClones, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + remoteStoreIndexShallowCopyV2 ); } @@ -471,7 +583,8 @@ public Entry withRemoteStoreIndexShallowCopy(final boolean remoteStoreIndexShall version, source, clones, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + remoteStoreIndexShallowCopyV2 ); } @@ -527,7 +640,8 @@ public Entry fail(final Map shards, State state, S version, source, clones, - remoteStoreIndexShallowCopy + remoteStoreIndexShallowCopy, + remoteStoreIndexShallowCopyV2 ); } @@ -614,6 +728,10 @@ public boolean remoteStoreIndexShallowCopy() { return remoteStoreIndexShallowCopy; } + public boolean remoteStoreIndexShallowCopyV2() { + return remoteStoreIndexShallowCopyV2; + } + public Map userMetadata() { return userMetadata; } @@ -678,6 +796,7 @@ public boolean equals(Object o) { if (Objects.equals(source, ((Entry) o).source) == false) return false; if (clones.equals(((Entry) o).clones) == false) return false; if (remoteStoreIndexShallowCopy != entry.remoteStoreIndexShallowCopy) return false; + if (remoteStoreIndexShallowCopyV2 != entry.remoteStoreIndexShallowCopyV2) return false; return true; } @@ -695,6 +814,7 @@ public int hashCode() { result = 31 * result + (source == null ? 0 : source.hashCode()); result = 31 * result + clones.hashCode(); result = 31 * result + (remoteStoreIndexShallowCopy ? 1 : 0); + result = 31 * result + (remoteStoreIndexShallowCopyV2 ? 1 : 0); return result; } @@ -766,6 +886,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeBoolean(remoteStoreIndexShallowCopy); } + if (out.getVersion().onOrAfter(Version.V_2_18_0)) { + out.writeBoolean(remoteStoreIndexShallowCopyV2); + } } @Override diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 13a57d93f03f0..9859abe503eaa 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.ClusterStateTaskConfig; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.LocalClusterUpdateTask; +import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.ClusterFormationFailureHelper.ClusterFormationState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; @@ -187,6 +188,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final NodeHealthService nodeHealthService; private final PersistedStateRegistry persistedStateRegistry; private final RemoteStoreNodeService remoteStoreNodeService; + private NodeConnectionsService nodeConnectionsService; /** * @param nodeName The name of the node, used to name the {@link java.util.concurrent.ExecutorService} of the {@link SeedHostsResolver}. @@ -418,7 +420,11 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { synchronized (mutex) { final DiscoveryNode sourceNode = publishRequest.getAcceptedState().nodes().getClusterManagerNode(); - logger.trace("handlePublishRequest: handling [{}] from [{}]", publishRequest, sourceNode); + logger.debug( + "handlePublishRequest: handling version [{}] from [{}]", + publishRequest.getAcceptedState().getVersion(), + sourceNode + ); if (sourceNode.equals(getLocalNode()) && mode != Mode.LEADER) { // Rare case in which we stood down as leader between starting this publication and receiving it ourselves. The publication @@ -630,7 +636,6 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback transportService.connectToNode(joinRequest.getSourceNode(), ActionListener.wrap(ignore -> { final ClusterState stateForJoinValidation = getStateForClusterManagerService(); - if (stateForJoinValidation.nodes().isLocalNodeElectedClusterManager()) { onJoinValidators.forEach(a -> a.accept(joinRequest.getSourceNode(), stateForJoinValidation)); if (stateForJoinValidation.getBlocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) == false) { @@ -814,6 +819,10 @@ public void onFailure(String source, Exception e) { public ClusterTasksResult execute(ClusterState currentState) { if (currentState.nodes().isLocalNodeElectedClusterManager() == false) { allocationService.cleanCaches(); + // This set only needs to be maintained on active cluster-manager + // This is cleaned up to avoid stale entries which would block future reconnections + logger.trace("Removing all pending disconnections as part of cluster-manager cleanup"); + nodeConnectionsService.clearPendingDisconnections(); } return unchanged(); } @@ -914,11 +923,18 @@ public DiscoveryStats stats() { @Override public void startInitialJoin() { synchronized (mutex) { + logger.trace("Starting initial join, becoming candidate"); becomeCandidate("startInitialJoin"); } clusterBootstrapService.scheduleUnconfiguredBootstrap(); } + @Override + public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { + assert this.nodeConnectionsService == null : "nodeConnectionsService is already set"; + this.nodeConnectionsService = nodeConnectionsService; + } + @Override protected void doStop() { configuredHostsResolver.stop(); @@ -1356,6 +1372,9 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) currentPublication = Optional.of(publication); final DiscoveryNodes publishNodes = publishRequest.getAcceptedState().nodes(); + // marking pending disconnects before publish + // if a nodes tries to send a joinRequest while it is pending disconnect, it should fail + nodeConnectionsService.setPendingDisconnections(new HashSet<>(clusterChangedEvent.nodesDelta().removedNodes())); leaderChecker.setCurrentNodes(publishNodes); followersChecker.setCurrentNodes(publishNodes); lagDetector.setTrackedNodes(publishNodes); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 838b5723b217b..13033b670d44b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -420,9 +420,14 @@ public static void ensureIndexCompatibility(final Version nodeVersion, Metadata * ensures that the joining node has a version that's compatible with all current nodes */ public static void ensureNodesCompatibility(final DiscoveryNode joiningNode, DiscoveryNodes currentNodes, Metadata metadata) { - final Version minNodeVersion = currentNodes.getMinNodeVersion(); - final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); - ensureNodesCompatibility(joiningNode, currentNodes, metadata, minNodeVersion, maxNodeVersion); + try { + final Version minNodeVersion = currentNodes.getMinNodeVersion(); + final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); + ensureNodesCompatibility(joiningNode, currentNodes, metadata, minNodeVersion, maxNodeVersion); + } catch (Exception e) { + logger.error("Exception in NodesCompatibility validation", e); + throw e; + } } /** @@ -539,9 +544,11 @@ private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNod CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); List reposToSkip = new ArrayList<>(1); + // find a remote node which has routing table configured Optional remoteRoutingTableNode = existingNodes.stream() .filter( - node -> node.getAttributes().get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) != null + node -> node.isRemoteStoreNode() + && node.getAttributes().get(RemoteStoreNodeAttribute.REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY) != null ) .findFirst(); // If none of the existing nodes have routing table repo, then we skip this repo check if present in joining node. diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java index 43801a05dbc24..3f7218939be92 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java @@ -85,7 +85,7 @@ public Publication(PublishRequest publishRequest, AckListener ackListener, LongS } public void start(Set faultyNodes) { - logger.trace("publishing {} to {}", publishRequest, publicationTargets); + logger.debug("publishing version {} to {}", publishRequest.getAcceptedState().getVersion(), publicationTargets); for (final DiscoveryNode faultyNode : faultyNodes) { onFaultyNode(faultyNode); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index cdf331b7bb577..caed2b6eceb49 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -542,7 +542,7 @@ public String executor() { } public void sendClusterState(DiscoveryNode destination, ActionListener listener) { - logger.debug("sending cluster state over transport to node: {}", destination.getName()); + logger.trace("sending cluster state over transport to node: {}", destination.getName()); if (sendFullVersion || previousState.nodes().nodeExists(destination) == false) { logger.trace("sending full cluster state version [{}] to [{}]", newState.version(), destination); sendFullClusterState(destination, listener); diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java index 47080cfbde692..d0b6f812e9ee2 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterApplierService.java @@ -502,6 +502,7 @@ private void runTask(UpdateTask task) { try { applyChanges(task, previousClusterState, newClusterState, stopWatch); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, currentTimeInMillis() - startTimeMS)); + // At this point, cluster state appliers and listeners are completed logger.debug( "processing [{}]: took [{}] done applying updated cluster state (version: {}, uuid: {})", task.source, @@ -510,6 +511,7 @@ private void runTask(UpdateTask task) { newClusterState.stateUUID() ); warnAboutSlowTaskIfNeeded(executionTime, task.source, stopWatch); + // Then we call the ClusterApplyListener of the task task.listener.onSuccess(task.source); } catch (Exception e) { TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, currentTimeInMillis() - startTimeMS)); @@ -578,6 +580,7 @@ private void applyChanges(UpdateTask task, ClusterState previousClusterState, Cl logger.debug("apply cluster state with version {}", newClusterState.version()); callClusterStateAppliers(clusterChangedEvent, stopWatch); + logger.debug("completed calling appliers of cluster state for version {}", newClusterState.version()); nodeConnectionsService.disconnectFromNodesExcept(newClusterState.nodes()); @@ -594,6 +597,7 @@ private void applyChanges(UpdateTask task, ClusterState previousClusterState, Cl state.set(newClusterState); callClusterStateListeners(clusterChangedEvent, stopWatch); + logger.debug("completed calling listeners of cluster state for version {}", newClusterState.version()); } protected void connectToNodesAndWait(ClusterState newClusterState) { diff --git a/server/src/main/java/org/opensearch/common/Table.java b/server/src/main/java/org/opensearch/common/Table.java index da14f628efa0f..133ec3052e6c9 100644 --- a/server/src/main/java/org/opensearch/common/Table.java +++ b/server/src/main/java/org/opensearch/common/Table.java @@ -34,6 +34,7 @@ import org.opensearch.common.time.DateFormatter; import org.opensearch.core.common.Strings; +import org.opensearch.rest.pagination.PageToken; import java.time.Instant; import java.time.ZoneOffset; @@ -59,9 +60,19 @@ public class Table { private List currentCells; private boolean inHeaders = false; private boolean withTime = false; + /** + * paginatedQueryResponse if null will imply the Table response is not paginated. + */ + private PageToken pageToken; public static final String EPOCH = "epoch"; public static final String TIMESTAMP = "timestamp"; + public Table() {} + + public Table(@Nullable PageToken pageToken) { + this.pageToken = pageToken; + } + public Table startHeaders() { inHeaders = true; currentCells = new ArrayList<>(); @@ -230,6 +241,10 @@ public Map getAliasMap() { return headerAliasMap; } + public PageToken getPageToken() { + return pageToken; + } + /** * Cell in a table * diff --git a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java index 2f60fc8f69f87..95a8d9c9495f2 100644 --- a/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java +++ b/server/src/main/java/org/opensearch/common/xcontent/JsonToStringXContentParser.java @@ -9,6 +9,7 @@ package org.opensearch.common.xcontent; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.Strings; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.xcontent.AbstractXContentParser; @@ -73,7 +74,7 @@ public XContentParser parseObject() throws IOException { builder.startObject(); LinkedList path = new LinkedList<>(Collections.singleton(fieldTypeName)); while (currentToken() != Token.END_OBJECT) { - parseToken(path, null); + parseToken(path); } // deduplication the fieldName,valueList,valueAndPathList builder.field(this.fieldTypeName, new HashSet<>(keyList)); @@ -87,14 +88,11 @@ public XContentParser parseObject() throws IOException { /** * @return true if the child object contains no_null value, false otherwise */ - private boolean parseToken(Deque path, String currentFieldName) throws IOException { - if (path.size() == 1 && processNoNestedValue()) { - return true; - } + private boolean parseToken(Deque path) throws IOException { boolean isChildrenValueValid = false; boolean visitFieldName = false; if (this.parser.currentToken() == Token.FIELD_NAME) { - currentFieldName = this.parser.currentName(); + final String currentFieldName = this.parser.currentName(); path.addLast(currentFieldName); // Pushing onto the stack *must* be matched by pop visitFieldName = true; String parts = currentFieldName; @@ -106,23 +104,21 @@ private boolean parseToken(Deque path, String currentFieldName) throws I } this.keyList.add(parts); // parts has no dot, so either it's the original fieldName or it's the last part this.parser.nextToken(); // advance to the value of fieldName - isChildrenValueValid = parseToken(path, currentFieldName); // parse the value for fieldName (which will be an array, an object, - // or a primitive value) + isChildrenValueValid = parseToken(path); // parse the value for fieldName (which will be an array, an object, + // or a primitive value) path.removeLast(); // Here is where we pop fieldName from the stack (since we're done with the value of fieldName) // Note that whichever other branch we just passed through has already ended with nextToken(), so we // don't need to call it. } else if (this.parser.currentToken() == Token.START_ARRAY) { parser.nextToken(); while (this.parser.currentToken() != Token.END_ARRAY) { - isChildrenValueValid |= parseToken(path, currentFieldName); + isChildrenValueValid |= parseToken(path); } this.parser.nextToken(); - } else if (this.parser.currentToken() == Token.END_ARRAY) { - // skip } else if (this.parser.currentToken() == Token.START_OBJECT) { parser.nextToken(); while (this.parser.currentToken() != Token.END_OBJECT) { - isChildrenValueValid |= parseToken(path, currentFieldName); + isChildrenValueValid |= parseToken(path); } this.parser.nextToken(); } else { @@ -148,21 +144,6 @@ public void removeKeyOfNullValue() { this.keyList.remove(keyList.size() - 1); } - private boolean processNoNestedValue() throws IOException { - if (parser.currentToken() == Token.VALUE_NULL) { - return true; - } else if (this.parser.currentToken() == Token.VALUE_STRING - || this.parser.currentToken() == Token.VALUE_NUMBER - || this.parser.currentToken() == Token.VALUE_BOOLEAN) { - String value = this.parser.textOrNull(); - if (value != null) { - this.valueList.add(value); - } - return true; - } - return false; - } - private String parseValue() throws IOException { switch (this.parser.currentToken()) { case VALUE_BOOLEAN: @@ -172,7 +153,7 @@ private String parseValue() throws IOException { return this.parser.textOrNull(); // Handle other token types as needed default: - throw new IOException("Unsupported value token type [" + parser.currentToken() + "]"); + throw new ParsingException(parser.getTokenLocation(), "Unexpected value token type [" + parser.currentToken() + "]"); } } diff --git a/server/src/main/java/org/opensearch/discovery/Discovery.java b/server/src/main/java/org/opensearch/discovery/Discovery.java index 9d6807b6522c9..6d9fb1f4985df 100644 --- a/server/src/main/java/org/opensearch/discovery/Discovery.java +++ b/server/src/main/java/org/opensearch/discovery/Discovery.java @@ -32,6 +32,7 @@ package org.opensearch.discovery; +import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.cluster.coordination.ClusterStatePublisher; import org.opensearch.common.lifecycle.LifecycleComponent; @@ -54,4 +55,8 @@ public interface Discovery extends LifecycleComponent, ClusterStatePublisher { */ void startInitialJoin(); + /** + * Sets the NodeConnectionsService which is an abstraction used for connection management + */ + void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService); } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java index df642a9f5a743..d86d41bb1a359 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardBatchFetch.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.logging.Loggers; import org.opensearch.core.index.shard.ShardId; @@ -48,7 +49,8 @@ public abstract class AsyncShardBatchFetch extend Class clazz, V emptyShardResponse, Predicate emptyShardResponsePredicate, - ShardBatchResponseFactory responseFactory + ShardBatchResponseFactory responseFactory, + ClusterManagerMetrics clusterManagerMetrics ) { super( logger, @@ -64,7 +66,8 @@ public abstract class AsyncShardBatchFetch extend clazz, emptyShardResponse, emptyShardResponsePredicate, - responseFactory + responseFactory, + clusterManagerMetrics ) ); } @@ -116,9 +119,10 @@ public ShardBatchCache( Class clazz, V emptyResponse, Predicate emptyShardResponsePredicate, - ShardBatchResponseFactory responseFactory + ShardBatchResponseFactory responseFactory, + ClusterManagerMetrics clusterManagerMetrics ) { - super(Loggers.getLogger(logger, "_" + logKey), type); + super(Loggers.getLogger(logger, "_" + logKey), type, clusterManagerMetrics); this.batchSize = shardAttributesMap.size(); this.emptyShardResponsePredicate = emptyShardResponsePredicate; cache = new HashMap<>(); diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java index b664dd573ce67..6017743ef2bd0 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetch.java @@ -35,6 +35,7 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -94,7 +95,8 @@ protected AsyncShardFetch( String type, ShardId shardId, String customDataPath, - Lister, T> action + Lister, T> action, + ClusterManagerMetrics clusterManagerMetrics ) { this.logger = logger; this.type = type; @@ -102,7 +104,7 @@ protected AsyncShardFetch( shardAttributesMap.put(shardId, new ShardAttributes(customDataPath)); this.action = (Lister, T>) action; this.reroutingKey = "ShardId=[" + shardId.toString() + "]"; - cache = new ShardCache<>(logger, reroutingKey, type); + cache = new ShardCache<>(logger, reroutingKey, type, clusterManagerMetrics); } /** @@ -284,8 +286,8 @@ static class ShardCache extends AsyncShardFetchCache private final Map> cache; - public ShardCache(Logger logger, String logKey, String type) { - super(Loggers.getLogger(logger, "_" + logKey), type); + public ShardCache(Logger logger, String logKey, String type, ClusterManagerMetrics clusterManagerMetrics) { + super(Loggers.getLogger(logger, "_" + logKey), type, clusterManagerMetrics); cache = new HashMap<>(); } diff --git a/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java index 2a4e6181467b0..9b0a95f611e0e 100644 --- a/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java +++ b/server/src/main/java/org/opensearch/gateway/AsyncShardFetchCache.java @@ -14,6 +14,7 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; @@ -51,10 +52,12 @@ public abstract class AsyncShardFetchCache { private final Logger logger; private final String type; + private final ClusterManagerMetrics clusterManagerMetrics; - protected AsyncShardFetchCache(Logger logger, String type) { + protected AsyncShardFetchCache(Logger logger, String type, ClusterManagerMetrics clusterManagerMetrics) { this.logger = logger; this.type = type; + this.clusterManagerMetrics = clusterManagerMetrics; } abstract void initData(DiscoveryNode node); @@ -162,6 +165,7 @@ Map getCacheData(DiscoveryNodes nodes, Set failedNodes } void processResponses(List responses, long fetchingRound) { + clusterManagerMetrics.incrementCounter(clusterManagerMetrics.asyncFetchSuccessCounter, Double.valueOf(responses.size())); for (K response : responses) { BaseNodeEntry nodeEntry = getCache().get(response.getNode().getId()); if (nodeEntry != null) { @@ -222,6 +226,7 @@ boolean retryableException(Throwable unwrappedCause) { } void processFailures(List failures, long fetchingRound) { + clusterManagerMetrics.incrementCounter(clusterManagerMetrics.asyncFetchFailureCounter, Double.valueOf(failures.size())); for (FailedNodeException failure : failures) { logger.trace("processing failure {} for [{}]", failure, type); BaseNodeEntry nodeEntry = getCache().get(failure.nodeId()); diff --git a/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java index c8ef9364ebba9..eaacb5dbfbd17 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayAllocator.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -92,11 +93,12 @@ public class GatewayAllocator implements ExistingShardsAllocator { public GatewayAllocator( RerouteService rerouteService, TransportNodesListGatewayStartedShards startedAction, - TransportNodesListShardStoreMetadata storeAction + TransportNodesListShardStoreMetadata storeAction, + ClusterManagerMetrics clusterManagerMetrics ) { this.rerouteService = rerouteService; - this.primaryShardAllocator = new InternalPrimaryShardAllocator(startedAction); - this.replicaShardAllocator = new InternalReplicaShardAllocator(storeAction); + this.primaryShardAllocator = new InternalPrimaryShardAllocator(startedAction, clusterManagerMetrics); + this.replicaShardAllocator = new InternalReplicaShardAllocator(storeAction, clusterManagerMetrics); } @Override @@ -251,9 +253,10 @@ class InternalAsyncFetch extends AsyncShardFetch String type, ShardId shardId, String customDataPath, - Lister, T> action + Lister, T> action, + ClusterManagerMetrics clusterManagerMetrics ) { - super(logger, type, shardId, customDataPath, action); + super(logger, type, shardId, customDataPath, action, clusterManagerMetrics); } @Override @@ -274,9 +277,11 @@ protected void reroute(String reroutingKey, String reason) { class InternalPrimaryShardAllocator extends PrimaryShardAllocator { private final TransportNodesListGatewayStartedShards startedAction; + private final ClusterManagerMetrics clusterManagerMetrics; - InternalPrimaryShardAllocator(TransportNodesListGatewayStartedShards startedAction) { + InternalPrimaryShardAllocator(TransportNodesListGatewayStartedShards startedAction, ClusterManagerMetrics clusterManagerMetrics) { this.startedAction = startedAction; + this.clusterManagerMetrics = clusterManagerMetrics; } @Override @@ -291,7 +296,8 @@ protected AsyncShardFetch.FetchResult shardState = fetch.fetchData( @@ -313,9 +319,11 @@ protected AsyncShardFetch.FetchResult shardStores = fetch.fetchData( diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index b3836edcd7d6c..f5da6df2689bd 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -66,13 +66,13 @@ import org.opensearch.gateway.remote.RemoteClusterStateService; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; import org.opensearch.index.recovery.RemoteStoreRestoreService; -import org.opensearch.index.recovery.RemoteStoreRestoreService.RemoteRestoreResult; import org.opensearch.node.Node; import org.opensearch.plugins.MetadataUpgrader; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; import java.io.Closeable; +import java.io.IOError; import java.io.IOException; import java.io.UncheckedIOException; import java.util.Collections; @@ -109,6 +109,8 @@ public class GatewayMetaState implements Closeable { */ public static final String STALE_STATE_CONFIG_NODE_ID = "STALE_STATE_CONFIG"; + private final Logger logger = LogManager.getLogger(GatewayMetaState.class); + private PersistedStateRegistry persistedStateRegistry; public PersistedState getPersistedState() { @@ -175,15 +177,11 @@ public void start( ); if (ClusterState.UNKNOWN_UUID.equals(lastKnownClusterUUID) == false) { // Load state from remote - final RemoteRestoreResult remoteRestoreResult = remoteStoreRestoreService.restore( - // Remote Metadata should always override local disk Metadata - // if local disk Metadata's cluster uuid is UNKNOWN_UUID - ClusterState.builder(clusterState).metadata(Metadata.EMPTY_METADATA).build(), - lastKnownClusterUUID, - false, - new String[] {} + clusterState = restoreClusterStateWithRetries( + remoteStoreRestoreService, + clusterState, + lastKnownClusterUUID ); - clusterState = remoteRestoreResult.getClusterState(); } } remotePersistedState = new RemotePersistedState(remoteClusterStateService, lastKnownClusterUUID); @@ -258,6 +256,50 @@ public void start( } } + private ClusterState restoreClusterStateWithRetries( + RemoteStoreRestoreService remoteStoreRestoreService, + ClusterState clusterState, + String lastKnownClusterUUID + ) { + int maxAttempts = 5; + int delayInMills = 200; + for (int attempt = 1; attempt <= maxAttempts; attempt++) { + try { + logger.info("Attempt {} to restore cluster state", attempt); + return restoreClusterState(remoteStoreRestoreService, clusterState, lastKnownClusterUUID); + } catch (Exception e) { + if (attempt == maxAttempts) { + // Throw an Error so that the process is halted. + throw new IOError(e); + } + try { + TimeUnit.MILLISECONDS.sleep(delayInMills); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); // Restore interrupted status + throw new RuntimeException(ie); + } + delayInMills = delayInMills * 2; + } + } + // This statement will never be reached. + return null; + } + + ClusterState restoreClusterState( + RemoteStoreRestoreService remoteStoreRestoreService, + ClusterState clusterState, + String lastKnownClusterUUID + ) { + return remoteStoreRestoreService.restore( + // Remote Metadata should always override local disk Metadata + // if local disk Metadata's cluster uuid is UNKNOWN_UUID + ClusterState.builder(clusterState).metadata(Metadata.EMPTY_METADATA).build(), + lastKnownClusterUUID, + false, + new String[] {} + ).getClusterState(); + } + // exposed so it can be overridden by tests ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) { assert clusterState.nodes().getLocalNode() == null : "prepareInitialClusterState must only be called once"; diff --git a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java index 5e2dcbcd70b40..d7c0a66ba3424 100644 --- a/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java +++ b/server/src/main/java/org/opensearch/gateway/ShardsBatchGatewayAllocator.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesResponse; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -44,6 +45,7 @@ import org.opensearch.indices.store.TransportNodesListShardStoreMetadataBatch.NodeStoreFilesMetadata; import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper; import org.opensearch.indices.store.TransportNodesListShardStoreMetadataHelper.StoreFilesMetadata; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import java.util.ArrayList; import java.util.Collections; @@ -81,6 +83,7 @@ public class ShardsBatchGatewayAllocator implements ExistingShardsAllocator { private TimeValue primaryShardsBatchGatewayAllocatorTimeout; private TimeValue replicaShardsBatchGatewayAllocatorTimeout; public static final TimeValue MIN_ALLOCATOR_TIMEOUT = TimeValue.timeValueSeconds(20); + private final ClusterManagerMetrics clusterManagerMetrics; /** * Number of shards we send in one batch to data nodes for fetching metadata @@ -160,7 +163,8 @@ public ShardsBatchGatewayAllocator( TransportNodesListGatewayStartedShardsBatch batchStartedAction, TransportNodesListShardStoreMetadataBatch batchStoreAction, Settings settings, - ClusterSettings clusterSettings + ClusterSettings clusterSettings, + ClusterManagerMetrics clusterManagerMetrics ) { this.rerouteService = rerouteService; this.primaryShardBatchAllocator = new InternalPrimaryBatchShardAllocator(); @@ -172,6 +176,7 @@ public ShardsBatchGatewayAllocator( clusterSettings.addSettingsUpdateConsumer(PRIMARY_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setPrimaryBatchAllocatorTimeout); this.replicaShardsBatchGatewayAllocatorTimeout = REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(REPLICA_BATCH_ALLOCATOR_TIMEOUT_SETTING, this::setReplicaBatchAllocatorTimeout); + this.clusterManagerMetrics = clusterManagerMetrics; } @Override @@ -187,6 +192,7 @@ protected ShardsBatchGatewayAllocator() { this(DEFAULT_SHARD_BATCH_SIZE, null); } + // for tests protected ShardsBatchGatewayAllocator(long batchSize, RerouteService rerouteService) { this.rerouteService = rerouteService; this.batchStartedAction = null; @@ -196,10 +202,9 @@ protected ShardsBatchGatewayAllocator(long batchSize, RerouteService rerouteServ this.maxBatchSize = batchSize; this.primaryShardsBatchGatewayAllocatorTimeout = null; this.replicaShardsBatchGatewayAllocatorTimeout = null; + this.clusterManagerMetrics = new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE); } - // for tests - @Override public int getNumberOfInFlightFetches() { int count = 0; @@ -413,7 +418,7 @@ else if (shardRouting.primary() == primary) { // add to batch if batch size full or last shard in unassigned list if (batchSize == 0 || iterator.hasNext() == false) { String batchUUId = UUIDs.base64UUID(); - ShardsBatch shardsBatch = new ShardsBatch(batchUUId, perBatchShards, primary); + ShardsBatch shardsBatch = new ShardsBatch(batchUUId, perBatchShards, primary, clusterManagerMetrics); // add the batch to list of current batches addBatch(shardsBatch, primary); batchesToBeAssigned.add(batchUUId); @@ -588,9 +593,21 @@ class InternalBatchAsyncFetch extends AsyncShardB Class clazz, V emptyShardResponse, Predicate emptyShardResponsePredicate, - ShardBatchResponseFactory responseFactory + ShardBatchResponseFactory responseFactory, + ClusterManagerMetrics clusterManagerMetrics ) { - super(logger, type, map, action, batchUUId, clazz, emptyShardResponse, emptyShardResponsePredicate, responseFactory); + super( + logger, + type, + map, + action, + batchUUId, + clazz, + emptyShardResponse, + emptyShardResponsePredicate, + responseFactory, + clusterManagerMetrics + ); } @Override @@ -650,16 +667,17 @@ protected boolean hasInitiatedFetching(ShardRouting shard) { * It should return false if there has never been a fetch for this batch. * This function is currently only used in the case of replica shards when all deciders returned NO/THROTTLE, and explain mode is ON. * Allocation explain and manual reroute APIs try to append shard store information (matching bytes) to the allocation decision. - * However, these APIs do not want to trigger a new asyncFetch for these ineligible shards, unless the data from nodes is already there. + * However, these APIs do not want to trigger a new asyncFetch for these ineligible shards + * They only want to use the data if it is already available. * This function is used to see if a fetch has happened to decide if it is possible to append shard store info without a new async fetch. * In the case when shard has a batch but no fetch has happened before, it would be because it is a new batch. * In the case when shard has a batch, and a fetch has happened before, and no fetch is ongoing, it would be because we have already completed fetch for all nodes. - * + *

* In order to check if a fetch has ever happened, we check 2 things: * 1. If the shard batch cache is empty, we know that fetch has never happened so we return false. * 2. If we see that the list of nodes to fetch from is empty, we know that all nodes have data or are ongoing a fetch. So we return true. * 3. Otherwise we return false. - * + *

* see {@link AsyncShardFetchCache#findNodesToFetch()} */ String batchId = getBatchId(shard, shard.primary()); @@ -669,7 +687,8 @@ protected boolean hasInitiatedFetching(ShardRouting shard) { logger.trace("Checking if fetching done for batch id {}", batchId); ShardsBatch shardsBatch = shard.primary() ? batchIdToStartedShardBatch.get(batchId) : batchIdToStoreShardBatch.get(batchId); // if fetchData has never been called, the per node cache will be empty and have no nodes - // this is because cache.fillShardCacheWithDataNodes(nodes) initialises this map and is called in AsyncShardFetch.fetchData + /// this is because {@link AsyncShardFetchCache#fillShardCacheWithDataNodes(DiscoveryNodes)} initialises this map + /// and is called in {@link AsyncShardFetch#fetchData(DiscoveryNodes, Map)} if (shardsBatch == null || shardsBatch.getAsyncFetcher().hasEmptyCache()) { logger.trace("Batch cache is empty for batch {} ", batchId); return false; @@ -739,7 +758,12 @@ public class ShardsBatch { private final Map batchInfo; - public ShardsBatch(String batchId, Map shardsWithInfo, boolean primary) { + public ShardsBatch( + String batchId, + Map shardsWithInfo, + boolean primary, + ClusterManagerMetrics clusterManagerMetrics + ) { this.batchId = batchId; this.batchInfo = new HashMap<>(shardsWithInfo); // create a ShardId -> customDataPath map for async fetch @@ -757,7 +781,8 @@ public ShardsBatch(String batchId, Map shardsWithInfo, bool GatewayStartedShard.class, new GatewayStartedShard(null, false, null, null), GatewayStartedShard::isEmpty, - new ShardBatchResponseFactory<>(true) + new ShardBatchResponseFactory<>(true), + clusterManagerMetrics ); } else { asyncBatch = new InternalBatchAsyncFetch<>( @@ -769,7 +794,8 @@ public ShardsBatch(String batchId, Map shardsWithInfo, bool NodeStoreFilesMetadata.class, new NodeStoreFilesMetadata(new StoreFilesMetadata(null, Store.MetadataSnapshot.EMPTY, Collections.emptyList()), null), NodeStoreFilesMetadata::isEmpty, - new ShardBatchResponseFactory<>(false) + new ShardBatchResponseFactory<>(false), + clusterManagerMetrics ); } } diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateChecksum.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateChecksum.java index d6739c4572d1a..aa007f5da15b3 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateChecksum.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateChecksum.java @@ -12,8 +12,10 @@ import org.apache.logging.log4j.Logger; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.DiffableStringMap; +import org.opensearch.common.CheckedFunction; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.io.stream.BufferedChecksumStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -22,11 +24,15 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParseException; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.function.Consumer; import com.jcraft.jzlib.JZlib; @@ -37,6 +43,7 @@ */ public class ClusterStateChecksum implements ToXContentFragment, Writeable { + public static final int COMPONENT_SIZE = 11; static final String ROUTING_TABLE_CS = "routing_table"; static final String NODES_CS = "discovery_nodes"; static final String BLOCKS_CS = "blocks"; @@ -65,62 +72,103 @@ public class ClusterStateChecksum implements ToXContentFragment, Writeable { long indicesChecksum; long clusterStateChecksum; - public ClusterStateChecksum(ClusterState clusterState) { - try ( - BytesStreamOutput out = new BytesStreamOutput(); - BufferedChecksumStreamOutput checksumOut = new BufferedChecksumStreamOutput(out) - ) { - clusterState.routingTable().writeVerifiableTo(checksumOut); - routingTableChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - clusterState.nodes().writeVerifiableTo(checksumOut); - nodesChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - clusterState.coordinationMetadata().writeVerifiableTo(checksumOut); - coordinationMetadataChecksum = checksumOut.getChecksum(); - - // Settings create sortedMap by default, so no explicit sorting required here. - checksumOut.reset(); - Settings.writeSettingsToStream(clusterState.metadata().persistentSettings(), checksumOut); - settingMetadataChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - Settings.writeSettingsToStream(clusterState.metadata().transientSettings(), checksumOut); - transientSettingsMetadataChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - clusterState.metadata().templatesMetadata().writeVerifiableTo(checksumOut); - templatesMetadataChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - checksumOut.writeStringCollection(clusterState.metadata().customs().keySet()); - customMetadataMapChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - ((DiffableStringMap) clusterState.metadata().hashesOfConsistentSettings()).writeTo(checksumOut); - hashesOfConsistentSettingsChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - checksumOut.writeMapValues( + public ClusterStateChecksum(ClusterState clusterState, ThreadPool threadpool) { + long start = threadpool.relativeTimeInNanos(); + ExecutorService executorService = threadpool.executor(ThreadPool.Names.REMOTE_STATE_CHECKSUM); + CountDownLatch latch = new CountDownLatch(COMPONENT_SIZE); + + executeChecksumTask((stream) -> { + clusterState.routingTable().writeVerifiableTo(stream); + return null; + }, checksum -> routingTableChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + clusterState.nodes().writeVerifiableTo(stream); + return null; + }, checksum -> nodesChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + clusterState.coordinationMetadata().writeVerifiableTo(stream); + return null; + }, checksum -> coordinationMetadataChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + Settings.writeSettingsToStream(clusterState.metadata().persistentSettings(), stream); + return null; + }, checksum -> settingMetadataChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + Settings.writeSettingsToStream(clusterState.metadata().transientSettings(), stream); + return null; + }, checksum -> transientSettingsMetadataChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + clusterState.metadata().templatesMetadata().writeVerifiableTo(stream); + return null; + }, checksum -> templatesMetadataChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + stream.writeStringCollection(clusterState.metadata().customs().keySet()); + return null; + }, checksum -> customMetadataMapChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + ((DiffableStringMap) clusterState.metadata().hashesOfConsistentSettings()).writeTo(stream); + return null; + }, checksum -> hashesOfConsistentSettingsChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + stream.writeMapValues( clusterState.metadata().indices(), - (stream, value) -> value.writeVerifiableTo((BufferedChecksumStreamOutput) stream) + (checksumStream, value) -> value.writeVerifiableTo((BufferedChecksumStreamOutput) checksumStream) ); - indicesChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - clusterState.blocks().writeVerifiableTo(checksumOut); - blocksChecksum = checksumOut.getChecksum(); - - checksumOut.reset(); - checksumOut.writeStringCollection(clusterState.customs().keySet()); - clusterStateCustomsChecksum = checksumOut.getChecksum(); - } catch (IOException e) { - logger.error("Failed to create checksum for cluster state.", e); + return null; + }, checksum -> indicesChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + clusterState.blocks().writeVerifiableTo(stream); + return null; + }, checksum -> blocksChecksum = checksum, executorService, latch); + + executeChecksumTask((stream) -> { + stream.writeStringCollection(clusterState.customs().keySet()); + return null; + }, checksum -> clusterStateCustomsChecksum = checksum, executorService, latch); + + try { + latch.await(); + } catch (InterruptedException e) { throw new RemoteStateTransferException("Failed to create checksum for cluster state.", e); } createClusterStateChecksum(); + logger.debug("Checksum execution time {}", TimeValue.nsecToMSec(threadpool.relativeTimeInNanos() - start)); + } + + private void executeChecksumTask( + CheckedFunction checksumTask, + Consumer checksumConsumer, + ExecutorService executorService, + CountDownLatch latch + ) { + executorService.execute(() -> { + try { + long checksum = createChecksum(checksumTask); + checksumConsumer.accept(checksum); + latch.countDown(); + } catch (IOException e) { + throw new RemoteStateTransferException("Failed to execute checksum task", e); + } + }); + } + + private long createChecksum(CheckedFunction task) throws IOException { + try ( + BytesStreamOutput out = new BytesStreamOutput(); + BufferedChecksumStreamOutput checksumOut = new BufferedChecksumStreamOutput(out) + ) { + task.apply(checksumOut); + return checksumOut.getChecksum(); + } } private void createClusterStateChecksum() { diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index ece29180f9cf5..ce5e57b79dadb 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -332,7 +332,9 @@ public RemoteClusterStateManifestInfo writeFullMetadata(ClusterState clusterStat uploadedMetadataResults, previousClusterUUID, clusterStateDiffManifest, - !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) ? new ClusterStateChecksum(clusterState) : null, + !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) + ? new ClusterStateChecksum(clusterState, threadpool) + : null, false, codecVersion ); @@ -539,7 +541,9 @@ public RemoteClusterStateManifestInfo writeIncrementalMetadata( uploadedMetadataResults, previousManifest.getPreviousClusterUUID(), clusterStateDiffManifest, - !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) ? new ClusterStateChecksum(clusterState) : null, + !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) + ? new ClusterStateChecksum(clusterState, threadpool) + : null, false, previousManifest.getCodecVersion() ); @@ -1010,7 +1014,9 @@ public RemoteClusterStateManifestInfo markLastStateAsCommitted( uploadedMetadataResults, previousManifest.getPreviousClusterUUID(), previousManifest.getDiffManifest(), - !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) ? new ClusterStateChecksum(clusterState) : null, + !remoteClusterStateValidationMode.equals(RemoteClusterStateValidationMode.NONE) + ? new ClusterStateChecksum(clusterState, threadpool) + : null, true, previousManifest.getCodecVersion() ); @@ -1631,7 +1637,7 @@ void validateClusterStateFromChecksum( String localNodeId, boolean isFullStateDownload ) { - ClusterStateChecksum newClusterStateChecksum = new ClusterStateChecksum(clusterState); + ClusterStateChecksum newClusterStateChecksum = new ClusterStateChecksum(clusterState, threadpool); List failedValidation = newClusterStateChecksum.getMismatchEntities(manifest.getClusterStateChecksum()); if (failedValidation.isEmpty()) { return; diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index bf8f83e1b95df..738efcfafdca1 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -30,6 +30,7 @@ import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.search.AutomatonQueries; import org.opensearch.common.xcontent.JsonToStringXContentParser; +import org.opensearch.core.common.ParsingException; import org.opensearch.core.xcontent.DeprecationHandler; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; @@ -568,31 +569,41 @@ protected void parseCreateField(ParseContext context) throws IOException { if (context.externalValueSet()) { String value = context.externalValue().toString(); parseValueAddFields(context, value, fieldType().name()); - } else if (context.parser().currentToken() != XContentParser.Token.VALUE_NULL) { - JsonToStringXContentParser jsonToStringParser = new JsonToStringXContentParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.IGNORE_DEPRECATIONS, - context.parser(), - fieldType().name() - ); - /* - JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser - It reads the JSON object and parsed to a list of string - */ - XContentParser parser = jsonToStringParser.parseObject(); - - XContentParser.Token currentToken; - while ((currentToken = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - switch (currentToken) { - case FIELD_NAME: - fieldName = parser.currentName(); - break; - case VALUE_STRING: - String value = parser.textOrNull(); - parseValueAddFields(context, value, fieldName); - break; + } else { + XContentParser ctxParser = context.parser(); + if (ctxParser.currentToken() != XContentParser.Token.VALUE_NULL) { + if (ctxParser.currentToken() != XContentParser.Token.START_OBJECT) { + throw new ParsingException( + ctxParser.getTokenLocation(), + "[" + this.name() + "] unexpected token [" + ctxParser.currentToken() + "] in flat_object field value" + ); } + JsonToStringXContentParser jsonToStringParser = new JsonToStringXContentParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.IGNORE_DEPRECATIONS, + ctxParser, + fieldType().name() + ); + /* + JsonToStringParser is the main parser class to transform JSON into stringFields in a XContentParser + It reads the JSON object and parsed to a list of string + */ + XContentParser parser = jsonToStringParser.parseObject(); + + XContentParser.Token currentToken; + while ((currentToken = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + switch (currentToken) { + case FIELD_NAME: + fieldName = parser.currentName(); + break; + case VALUE_STRING: + String value = parser.textOrNull(); + parseValueAddFields(context, value, fieldName); + break; + } + + } } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 72bf07d4b03b2..5be516166803e 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -854,7 +854,7 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException } // Check last fetch status of pinned timestamps. If stale, return. - if (RemoteStoreUtils.isPinnedTimestampStateStale()) { + if (lastNMetadataFilesToKeep != 0 && RemoteStoreUtils.isPinnedTimestampStateStale()) { logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); return; } @@ -994,7 +994,8 @@ public static void remoteDirectoryCleanup( String remoteStoreRepoForIndex, String indexUUID, ShardId shardId, - RemoteStorePathStrategy pathStrategy + RemoteStorePathStrategy pathStrategy, + boolean forceClean ) { try { RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) remoteDirectoryFactory.newDirectory( @@ -1003,8 +1004,12 @@ public static void remoteDirectoryCleanup( shardId, pathStrategy ); - remoteSegmentStoreDirectory.deleteStaleSegments(0); - remoteSegmentStoreDirectory.deleteIfEmpty(); + if (forceClean) { + remoteSegmentStoreDirectory.delete(); + } else { + remoteSegmentStoreDirectory.deleteStaleSegments(0); + remoteSegmentStoreDirectory.deleteIfEmpty(); + } } catch (Exception e) { staticLogger.error("Exception occurred while deleting directory", e); } @@ -1023,7 +1028,10 @@ private boolean deleteIfEmpty() throws IOException { logger.info("Remote directory still has files, not deleting the path"); return false; } + return delete(); + } + private boolean delete() { try { remoteDataDirectory.delete(); remoteMetadataDirectory.delete(); diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java index 27d34ec0d05af..ede422ea3c4f7 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslog.java @@ -34,6 +34,7 @@ import java.util.Optional; import java.util.Set; import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BooleanSupplier; import java.util.function.LongConsumer; @@ -61,6 +62,7 @@ public class RemoteFsTimestampAwareTranslog extends RemoteFsTranslog { private final Map> oldFormatMetadataFileGenerationMap; private final Map> oldFormatMetadataFilePrimaryTermMap; private final AtomicLong minPrimaryTermInRemote = new AtomicLong(Long.MAX_VALUE); + private final AtomicBoolean triggerTrimOnMinRemoteGenReferencedChange = new AtomicBoolean(false); public RemoteFsTimestampAwareTranslog( TranslogConfig config, @@ -105,6 +107,11 @@ protected void onDelete() { } } + @Override + protected void onMinRemoteGenReferencedChange() { + triggerTrimOnMinRemoteGenReferencedChange.set(true); + } + @Override public void trimUnreferencedReaders() throws IOException { trimUnreferencedReaders(false, true); @@ -114,7 +121,7 @@ public void trimUnreferencedReaders() throws IOException { protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) throws IOException { if (trimLocal) { // clean up local translog files and updates readers - super.trimUnreferencedReaders(); + super.trimUnreferencedReaders(true); } // Update file tracker to reflect local translog state @@ -135,14 +142,22 @@ protected void trimUnreferencedReaders(boolean indexDeleted, boolean trimLocal) // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote // store. - if (startedPrimarySupplier.getAsBoolean() == false || pauseSync.get()) { + if (indexDeleted == false && (startedPrimarySupplier.getAsBoolean() == false || pauseSync.get())) { return; } // This is to fail fast and avoid listing md files un-necessarily. if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { - logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); + logger.warn("Skipping remote translog garbage collection as last fetch of pinned timestamp is stale"); + return; + } + + // This code block ensures parity with RemoteFsTranslog. Without this, we will end up making list translog metadata + // call in each invocation of trimUnreferencedReaders + if (indexDeleted == false && triggerTrimOnMinRemoteGenReferencedChange.get() == false) { return; + } else if (triggerTrimOnMinRemoteGenReferencedChange.get()) { + triggerTrimOnMinRemoteGenReferencedChange.set(false); } // Since remote generation deletion is async, this ensures that only one generation deletion happens at a time. @@ -158,7 +173,7 @@ public void onResponse(List blobMetadata) { List metadataFiles = blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList()); try { - if (metadataFiles.size() <= 1) { + if (indexDeleted == false && metadataFiles.size() <= 1) { logger.debug("No stale translog metadata files found"); remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); return; @@ -166,16 +181,12 @@ public void onResponse(List blobMetadata) { // Check last fetch status of pinned timestamps. If stale, return. if (indexDeleted == false && RemoteStoreUtils.isPinnedTimestampStateStale()) { - logger.warn("Skipping remote segment store garbage collection as last fetch of pinned timestamp is stale"); + logger.warn("Skipping remote translog garbage collection as last fetch of pinned timestamp is stale"); remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); return; } - List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted( - metadataFiles, - metadataFilePinnedTimestampMap, - logger - ); + List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted(metadataFiles, indexDeleted); // If index is not deleted, make sure to keep latest metadata file if (indexDeleted == false) { @@ -194,10 +205,11 @@ public void onResponse(List blobMetadata) { metadataFilesNotToBeDeleted.removeAll(metadataFilesToBeDeleted); logger.debug(() -> "metadataFilesNotToBeDeleted = " + metadataFilesNotToBeDeleted); + Set generationsToBeDeleted = getGenerationsToBeDeleted( metadataFilesNotToBeDeleted, metadataFilesToBeDeleted, - indexDeleted + indexDeleted ? Long.MAX_VALUE : getMinGenerationToKeepInRemote() ); logger.debug(() -> "generationsToBeDeleted = " + generationsToBeDeleted); @@ -208,7 +220,11 @@ public void onResponse(List blobMetadata) { generationsToBeDeleted, remoteGenerationDeletionPermits::release ); + } else { + remoteGenerationDeletionPermits.release(); + } + if (metadataFilesToBeDeleted.isEmpty() == false) { // Delete stale metadata files translogTransferManager.deleteMetadataFilesAsync( metadataFilesToBeDeleted, @@ -217,11 +233,10 @@ public void onResponse(List blobMetadata) { // Update cache to keep only those metadata files that are not getting deleted oldFormatMetadataFileGenerationMap.keySet().retainAll(metadataFilesNotToBeDeleted); - // Delete stale primary terms deleteStaleRemotePrimaryTerms(metadataFilesNotToBeDeleted); } else { - remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); + remoteGenerationDeletionPermits.release(); } } catch (Exception e) { remoteGenerationDeletionPermits.release(REMOTE_DELETION_PERMITS); @@ -237,18 +252,16 @@ public void onFailure(Exception e) { translogTransferManager.listTranslogMetadataFilesAsync(listMetadataFilesListener); } + private long getMinGenerationToKeepInRemote() { + return minRemoteGenReferenced - indexSettings().getRemoteTranslogExtraKeep(); + } + // Visible for testing protected Set getGenerationsToBeDeleted( List metadataFilesNotToBeDeleted, List metadataFilesToBeDeleted, - boolean indexDeleted + long minGenerationToKeepInRemote ) throws IOException { - long maxGenerationToBeDeleted = Long.MAX_VALUE; - - if (indexDeleted == false) { - maxGenerationToBeDeleted = minRemoteGenReferenced - 1 - indexSettings().getRemoteTranslogExtraKeep(); - } - Set generationsFromMetadataFilesToBeDeleted = new HashSet<>(); for (String mdFile : metadataFilesToBeDeleted) { Tuple minMaxGen = getMinMaxTranslogGenerationFromMetadataFile(mdFile, translogTransferManager); @@ -262,21 +275,31 @@ protected Set getGenerationsToBeDeleted( Set generationsToBeDeleted = new HashSet<>(); for (long generation : generationsFromMetadataFilesToBeDeleted) { // Check if the generation is not referred by metadata file matching pinned timestamps - if (generation <= maxGenerationToBeDeleted && isGenerationPinned(generation, pinnedGenerations) == false) { + // The check with minGenerationToKeep is redundant but kept as to make sure we don't delete generations + // that are not persisted in remote segment store yet. + if (generation < minGenerationToKeepInRemote && isGenerationPinned(generation, pinnedGenerations) == false) { generationsToBeDeleted.add(generation); } } return generationsToBeDeleted; } - protected List getMetadataFilesToBeDeleted(List metadataFiles) { - return getMetadataFilesToBeDeleted(metadataFiles, metadataFilePinnedTimestampMap, logger); + protected List getMetadataFilesToBeDeleted(List metadataFiles, boolean indexDeleted) { + return getMetadataFilesToBeDeleted( + metadataFiles, + metadataFilePinnedTimestampMap, + getMinGenerationToKeepInRemote(), + indexDeleted, + logger + ); } // Visible for testing protected static List getMetadataFilesToBeDeleted( List metadataFiles, Map metadataFilePinnedTimestampMap, + long minGenerationToKeepInRemote, + boolean indexDeleted, Logger logger ) { Tuple> pinnedTimestampsState = RemoteStorePinnedTimestampService.getPinnedTimestamps(); @@ -312,6 +335,22 @@ protected static List getMetadataFilesToBeDeleted( metadataFilesToBeDeleted.size() ); + if (indexDeleted == false) { + // Filter out metadata files based on minGenerationToKeep + List metadataFilesContainingMinGenerationToKeep = metadataFilesToBeDeleted.stream().filter(md -> { + long maxGeneration = TranslogTransferMetadata.getMaxGenerationFromFileName(md); + return maxGeneration == -1 || maxGeneration >= minGenerationToKeepInRemote; + }).collect(Collectors.toList()); + metadataFilesToBeDeleted.removeAll(metadataFilesContainingMinGenerationToKeep); + + logger.trace( + "metadataFilesContainingMinGenerationToKeep.size = {}, metadataFilesToBeDeleted based on minGenerationToKeep filtering = {}, minGenerationToKeep = {}", + metadataFilesContainingMinGenerationToKeep.size(), + metadataFilesToBeDeleted.size(), + minGenerationToKeepInRemote + ); + } + return metadataFilesToBeDeleted; } @@ -472,50 +511,60 @@ protected static Tuple getMinMaxPrimaryTermFromMetadataFile( } } - public static void cleanup(TranslogTransferManager translogTransferManager) throws IOException { - ActionListener> listMetadataFilesListener = new ActionListener<>() { - @Override - public void onResponse(List blobMetadata) { - List metadataFiles = blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList()); + public static void cleanupOfDeletedIndex(TranslogTransferManager translogTransferManager, boolean forceClean) throws IOException { + if (forceClean) { + translogTransferManager.delete(); + } else { + ActionListener> listMetadataFilesListener = new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + List metadataFiles = blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList()); + + try { + if (metadataFiles.isEmpty()) { + staticLogger.debug("No stale translog metadata files found"); + return; + } + List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted( + metadataFiles, + new HashMap<>(), + Long.MAX_VALUE, + true, // This method gets called when the index is no longer present + staticLogger + ); + if (metadataFilesToBeDeleted.isEmpty()) { + staticLogger.debug("No metadata files to delete"); + return; + } + staticLogger.debug(() -> "metadataFilesToBeDeleted = " + metadataFilesToBeDeleted); - try { - if (metadataFiles.isEmpty()) { - staticLogger.debug("No stale translog metadata files found"); - return; - } - List metadataFilesToBeDeleted = getMetadataFilesToBeDeleted(metadataFiles, new HashMap<>(), staticLogger); - if (metadataFilesToBeDeleted.isEmpty()) { - staticLogger.debug("No metadata files to delete"); - return; - } - staticLogger.debug(() -> "metadataFilesToBeDeleted = " + metadataFilesToBeDeleted); + // For all the files that we are keeping, fetch min and max generations + List metadataFilesNotToBeDeleted = new ArrayList<>(metadataFiles); + metadataFilesNotToBeDeleted.removeAll(metadataFilesToBeDeleted); + staticLogger.debug(() -> "metadataFilesNotToBeDeleted = " + metadataFilesNotToBeDeleted); - // For all the files that we are keeping, fetch min and max generations - List metadataFilesNotToBeDeleted = new ArrayList<>(metadataFiles); - metadataFilesNotToBeDeleted.removeAll(metadataFilesToBeDeleted); - staticLogger.debug(() -> "metadataFilesNotToBeDeleted = " + metadataFilesNotToBeDeleted); + // Delete stale metadata files + translogTransferManager.deleteMetadataFilesAsync(metadataFilesToBeDeleted, () -> {}); - // Delete stale metadata files - translogTransferManager.deleteMetadataFilesAsync(metadataFilesToBeDeleted, () -> {}); + // Delete stale primary terms + deleteStaleRemotePrimaryTerms( + metadataFilesNotToBeDeleted, + translogTransferManager, + new HashMap<>(), + new AtomicLong(Long.MAX_VALUE), + staticLogger + ); + } catch (Exception e) { + staticLogger.error("Exception while cleaning up metadata and primary terms", e); + } + } - // Delete stale primary terms - deleteStaleRemotePrimaryTerms( - metadataFilesNotToBeDeleted, - translogTransferManager, - new HashMap<>(), - new AtomicLong(Long.MAX_VALUE), - staticLogger - ); - } catch (Exception e) { + @Override + public void onFailure(Exception e) { staticLogger.error("Exception while cleaning up metadata and primary terms", e); } - } - - @Override - public void onFailure(Exception e) { - staticLogger.error("Exception while cleaning up metadata and primary terms", e); - } - }; - translogTransferManager.listTranslogMetadataFilesAsync(listMetadataFilesListener); + }; + translogTransferManager.listTranslogMetadataFilesAsync(listMetadataFilesListener); + } } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 812852d107682..f5a9ed8ed9362 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -78,6 +78,9 @@ public class RemoteFsTranslog extends Translog { // min generation referred by last uploaded translog protected volatile long minRemoteGenReferenced; + // the max global checkpoint that has been synced + protected volatile long globalCheckpointSynced; + // clean up translog folder uploaded by previous primaries once protected final SetOnce olderPrimaryCleaned = new SetOnce<>(); @@ -437,9 +440,10 @@ private boolean upload(long primaryTerm, long generation, long maxSeqNo) throws config.getNodeId() ).build() ) { + Checkpoint checkpoint = current.getLastSyncedCheckpoint(); return translogTransferManager.transferSnapshot( transferSnapshotProvider, - new RemoteFsTranslogTransferListener(generation, primaryTerm, maxSeqNo) + new RemoteFsTranslogTransferListener(generation, primaryTerm, maxSeqNo, checkpoint.globalCheckpoint) ); } finally { syncPermit.release(SYNC_PERMIT); @@ -474,7 +478,10 @@ public void sync() throws IOException { public boolean syncNeeded() { try (ReleasableLock lock = readLock.acquire()) { return current.syncNeeded() - || (maxRemoteTranslogGenerationUploaded + 1 < this.currentFileGeneration() && current.totalOperations() == 0); + || (maxRemoteTranslogGenerationUploaded + 1 < this.currentFileGeneration() && current.totalOperations() == 0) + // The below condition on GCP exists to handle global checkpoint updates during close index. + // Refer issue - https://github.com/opensearch-project/OpenSearch/issues/15989 + || (current.getLastSyncedCheckpoint().globalCheckpoint > globalCheckpointSynced); } } @@ -549,9 +556,17 @@ protected Releasable drainSync() { @Override public void trimUnreferencedReaders() throws IOException { + trimUnreferencedReaders(false); + } + + protected void trimUnreferencedReaders(boolean onlyTrimLocal) throws IOException { // clean up local translog files and updates readers super.trimUnreferencedReaders(); + if (onlyTrimLocal) { + return; + } + // This is to ensure that after the permits are acquired during primary relocation, there are no further modification on remote // store. if (startedPrimarySupplier.getAsBoolean() == false || pauseSync.get()) { @@ -674,21 +689,34 @@ private class RemoteFsTranslogTransferListener implements TranslogTransferListen private final long maxSeqNo; - RemoteFsTranslogTransferListener(long generation, long primaryTerm, long maxSeqNo) { + private final long globalCheckpoint; + + RemoteFsTranslogTransferListener(long generation, long primaryTerm, long maxSeqNo, long globalCheckpoint) { this.generation = generation; this.primaryTerm = primaryTerm; this.maxSeqNo = maxSeqNo; + this.globalCheckpoint = globalCheckpoint; } @Override public void onUploadComplete(TransferSnapshot transferSnapshot) throws IOException { maxRemoteTranslogGenerationUploaded = generation; + long previousMinRemoteGenReferenced = minRemoteGenReferenced; minRemoteGenReferenced = getMinFileGeneration(); + // Update the global checkpoint only if the supplied global checkpoint is greater than it + // When a new writer is created the + if (globalCheckpoint > globalCheckpointSynced) { + globalCheckpointSynced = globalCheckpoint; + } + if (previousMinRemoteGenReferenced != minRemoteGenReferenced) { + onMinRemoteGenReferencedChange(); + } logger.debug( - "Successfully uploaded translog for primary term = {}, generation = {}, maxSeqNo = {}", + "Successfully uploaded translog for primary term = {}, generation = {}, maxSeqNo = {}, minRemoteGenReferenced = {}", primaryTerm, generation, - maxSeqNo + maxSeqNo, + minRemoteGenReferenced ); } @@ -702,6 +730,10 @@ public void onUploadFailed(TransferSnapshot transferSnapshot, Exception ex) thro } } + protected void onMinRemoteGenReferencedChange() { + + } + @Override public long getMinUnreferencedSeqNoInSegments(long minUnrefCheckpointInLastCommit) { return minSeqNoToKeep; diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java index 3b8885055e8f7..7fe3305545085 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferMetadata.java @@ -170,6 +170,16 @@ public static Tuple getMinMaxTranslogGenerationFromFilename(String f } } + public static long getMaxGenerationFromFileName(String filename) { + String[] tokens = filename.split(METADATA_SEPARATOR); + try { + return RemoteStoreUtils.invertLong(tokens[2]); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("Exception while getting max generation from: {}", filename), e); + return -1; + } + } + public static Tuple getMinMaxPrimaryTermFromFilename(String filename) { String[] tokens = filename.split(METADATA_SEPARATOR); if (tokens.length < 7) { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 2d8a1af383fb0..56d04df5921ee 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1598,6 +1598,7 @@ public Node start() throws NodeValidationException { injector.getInstance(GatewayService.class).start(); Discovery discovery = injector.getInstance(Discovery.class); + discovery.setNodeConnectionsService(nodeConnectionsService); clusterService.getClusterManagerService().setClusterStatePublisher(discovery::publish); // Start the transport service now so the publish address will be added to the local disco node in ClusterService diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 1eb38ea63ad5a..9671fda14375d 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -54,6 +54,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.MonitorService; +import org.opensearch.node.remotestore.RemoteStoreNodeStats; import org.opensearch.plugins.PluginsService; import org.opensearch.ratelimitting.admissioncontrol.AdmissionControlService; import org.opensearch.repositories.RepositoriesService; @@ -241,7 +242,8 @@ public NodeStats stats( boolean segmentReplicationTrackerStats, boolean repositoriesStats, boolean admissionControl, - boolean cacheService + boolean cacheService, + boolean remoteStoreNodeStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -274,7 +276,8 @@ public NodeStats stats( segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null, admissionControl ? this.admissionControlService.stats() : null, - cacheService ? this.cacheService.stats(indices) : null + cacheService ? this.cacheService.stats(indices) : null, + remoteStoreNodeStats ? new RemoteStoreNodeStats() : null ); } diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeStats.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeStats.java new file mode 100644 index 0000000000000..8da8a17e21839 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeStats.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Node level remote store stats + * @opensearch.internal + */ +public class RemoteStoreNodeStats implements Writeable, ToXContentFragment { + + public static final String STATS_NAME = "remote_store"; + public static final String LAST_SUCCESSFUL_FETCH_OF_PINNED_TIMESTAMPS = "last_successful_fetch_of_pinned_timestamps"; + + /** + * Time stamp for the last successful fetch of pinned timestamps by the {@linkplain RemoteStorePinnedTimestampService} + */ + private final long lastSuccessfulFetchOfPinnedTimestamps; + + public RemoteStoreNodeStats() { + this.lastSuccessfulFetchOfPinnedTimestamps = RemoteStorePinnedTimestampService.getPinnedTimestamps().v1(); + } + + public long getLastSuccessfulFetchOfPinnedTimestamps() { + return this.lastSuccessfulFetchOfPinnedTimestamps; + } + + public RemoteStoreNodeStats(StreamInput in) throws IOException { + this.lastSuccessfulFetchOfPinnedTimestamps = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(this.lastSuccessfulFetchOfPinnedTimestamps); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(STATS_NAME); + builder.field(LAST_SUCCESSFUL_FETCH_OF_PINNED_TIMESTAMPS, this.lastSuccessfulFetchOfPinnedTimestamps); + return builder.endObject(); + } + + @Override + public String toString() { + return "RemoteStoreNodeStats{ lastSuccessfulFetchOfPinnedTimestamps=" + lastSuccessfulFetchOfPinnedTimestamps + "}"; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (o.getClass() != RemoteStoreNodeStats.class) { + return false; + } + RemoteStoreNodeStats other = (RemoteStoreNodeStats) o; + return this.lastSuccessfulFetchOfPinnedTimestamps == other.lastSuccessfulFetchOfPinnedTimestamps; + } + + @Override + public int hashCode() { + return Objects.hash(lastSuccessfulFetchOfPinnedTimestamps); + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java index 71133615ed056..1448c46583f6a 100644 --- a/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStorePinnedTimestampService.java @@ -240,6 +240,10 @@ public void unpinTimestamp(long timestamp, String pinningEntity, ActionListener< } } + public void forceSyncPinnedTimestamps() { + asyncUpdatePinnedTimestampTask.run(); + } + @Override public void close() throws IOException { asyncUpdatePinnedTimestampTask.close(); diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index d4520beb5b570..138bc13140aea 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -180,7 +180,7 @@ void finalizeSnapshot( * @param repositoryUpdatePriority priority for the cluster state update task * @param listener listener to be invoked with the new {@link RepositoryData} after completing the snapshot */ - void finalizeSnapshot( + default void finalizeSnapshot( ShardGenerations shardGenerations, long repositoryStateId, Metadata clusterMetadata, @@ -189,7 +189,9 @@ void finalizeSnapshot( Function stateTransformer, Priority repositoryUpdatePriority, ActionListener listener - ); + ) { + throw new UnsupportedOperationException(); + } /** * Deletes snapshots diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 0292cecc36a81..14c201e819994 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -199,7 +199,6 @@ import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; -import static org.opensearch.snapshots.SnapshotsService.SNAPSHOT_PINNED_TIMESTAMP_DELIMITER; /** * BlobStore - based implementation of Snapshot Repository @@ -1309,7 +1308,7 @@ private void cleanUpRemoteStoreFilesForDeletedIndicesV2( } // iterate through all the indices and trigger remote store directory cleanup for deleted index segments for (String indexId : uniqueIndexIds) { - cleanRemoteStoreDirectoryIfNeeded(snapshotIds, indexId, repositoryData, remoteSegmentStoreDirectoryFactory); + cleanRemoteStoreDirectoryIfNeeded(snapshotIds, indexId, repositoryData, remoteSegmentStoreDirectoryFactory, false); } afterCleanupsListener.onResponse(null); } catch (Exception e) { @@ -1357,11 +1356,17 @@ private void removeSnapshotPinnedTimestamp( ) { remoteStorePinnedTimestampService.unpinTimestamp( timestampToUnpin, - repository + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + snapshotId.getUUID(), + SnapshotsService.getPinningEntity(repository, snapshotId.getUUID()), new ActionListener() { @Override public void onResponse(Void unused) { - logger.debug("Timestamp {} unpinned successfully for snapshot {}", timestampToUnpin, snapshotId.getName()); + logger.info("Timestamp {} unpinned successfully for snapshot {}", timestampToUnpin, snapshotId.getName()); + try { + remoteStorePinnedTimestampService.forceSyncPinnedTimestamps(); + logger.debug("Successfully synced pinned timestamp state"); + } catch (Exception e) { + logger.warn("Exception while updating pinning timestamp state, snapshot deletion will continue", e); + } listener.onResponse(null); } @@ -1466,7 +1471,8 @@ public static void remoteDirectoryCleanupAsync( String indexUUID, ShardId shardId, String threadPoolName, - RemoteStorePathStrategy pathStrategy + RemoteStorePathStrategy pathStrategy, + boolean forceClean ) { threadpool.executor(threadPoolName) .execute( @@ -1476,7 +1482,8 @@ public static void remoteDirectoryCleanupAsync( remoteStoreRepoForIndex, indexUUID, shardId, - pathStrategy + pathStrategy, + forceClean ), indexUUID, shardId @@ -1532,7 +1539,8 @@ protected void releaseRemoteStoreLockAndCleanup( indexUUID, new ShardId(Index.UNKNOWN_INDEX_NAME, indexUUID, Integer.parseInt(shardId)), ThreadPool.Names.REMOTE_PURGE, - remoteStoreShardShallowCopySnapshot.getRemoteStorePathStrategy() + remoteStoreShardShallowCopySnapshot.getRemoteStorePathStrategy(), + false ); } } @@ -2095,7 +2103,7 @@ private void executeOneStaleIndexDelete( deleteResult = deleteResult.add(cleanUpStaleSnapshotShardPathsFile(matchingShardPaths, snapshotShardPaths)); if (remoteSegmentStoreDirectoryFactory != null) { - cleanRemoteStoreDirectoryIfNeeded(deletedSnapshots, indexSnId, oldRepoData, remoteSegmentStoreDirectoryFactory); + cleanRemoteStoreDirectoryIfNeeded(deletedSnapshots, indexSnId, oldRepoData, remoteSegmentStoreDirectoryFactory, true); } // Finally, we delete the [base_path]/indexId folder @@ -2167,7 +2175,8 @@ private void cleanRemoteStoreDirectoryIfNeeded( Collection deletedSnapshots, String indexSnId, RepositoryData oldRepoData, - RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory + RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory, + boolean forceClean ) { assert (indexSnId != null); @@ -2219,9 +2228,16 @@ private void cleanRemoteStoreDirectoryIfNeeded( prevIndexMetadata.getIndexUUID(), shard, ThreadPool.Names.REMOTE_PURGE, - remoteStorePathStrategy + remoteStorePathStrategy, + forceClean + ); + remoteTranslogCleanupAsync( + remoteTranslogRepository, + shard, + remoteStorePathStrategy, + prevIndexMetadata, + forceClean ); - remoteTranslogCleanupAsync(remoteTranslogRepository, shard, remoteStorePathStrategy, prevIndexMetadata); } } } catch (Exception e) { @@ -2245,7 +2261,8 @@ private void remoteTranslogCleanupAsync( Repository remoteTranslogRepository, ShardId shardId, RemoteStorePathStrategy remoteStorePathStrategy, - IndexMetadata prevIndexMetadata + IndexMetadata prevIndexMetadata, + boolean forceClean ) { assert remoteTranslogRepository instanceof BlobStoreRepository; boolean indexMetadataEnabled = RemoteStoreUtils.determineTranslogMetadataEnabled(prevIndexMetadata); @@ -2262,7 +2279,7 @@ private void remoteTranslogCleanupAsync( indexMetadataEnabled ); try { - RemoteFsTimestampAwareTranslog.cleanup(translogTransferManager); + RemoteFsTimestampAwareTranslog.cleanupOfDeletedIndex(translogTransferManager, forceClean); } catch (IOException e) { logger.error("Exception while cleaning up remote translog for shard: " + shardId, e); } diff --git a/server/src/main/java/org/opensearch/rest/RestHandler.java b/server/src/main/java/org/opensearch/rest/RestHandler.java index 1139e5fc65f31..143cbd472ed07 100644 --- a/server/src/main/java/org/opensearch/rest/RestHandler.java +++ b/server/src/main/java/org/opensearch/rest/RestHandler.java @@ -125,6 +125,13 @@ default boolean allowSystemIndexAccessByDefault() { return false; } + /** + * Denotes whether the RestHandler will output paginated responses or not. + */ + default boolean isActionPaginated() { + return false; + } + static RestHandler wrapper(RestHandler delegate) { return new Wrapper(delegate); } diff --git a/server/src/main/java/org/opensearch/rest/RestRequest.java b/server/src/main/java/org/opensearch/rest/RestRequest.java index 2c397f7fc6e8e..f241b567c3204 100644 --- a/server/src/main/java/org/opensearch/rest/RestRequest.java +++ b/server/src/main/java/org/opensearch/rest/RestRequest.java @@ -51,6 +51,7 @@ import org.opensearch.core.xcontent.XContentParser; import org.opensearch.http.HttpChannel; import org.opensearch.http.HttpRequest; +import org.opensearch.rest.pagination.PageParams; import java.io.IOException; import java.io.InputStream; @@ -67,6 +68,9 @@ import static org.opensearch.common.unit.TimeValue.parseTimeValue; import static org.opensearch.core.common.unit.ByteSizeValue.parseBytesSizeValue; +import static org.opensearch.rest.pagination.PageParams.PARAM_NEXT_TOKEN; +import static org.opensearch.rest.pagination.PageParams.PARAM_SIZE; +import static org.opensearch.rest.pagination.PageParams.PARAM_SORT; /** * REST Request @@ -591,6 +595,10 @@ public static MediaType parseContentType(List header) { throw new IllegalArgumentException("empty Content-Type header"); } + public PageParams parsePaginatedQueryParams(String defaultSortOrder, int defaultPageSize) { + return new PageParams(param(PARAM_NEXT_TOKEN), param(PARAM_SORT, defaultSortOrder), paramAsInt(PARAM_SIZE, defaultPageSize)); + } + /** * Thrown if there is an error in the content type header. * diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 9dc711f804144..1e76008ff8c64 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -50,6 +50,7 @@ import org.opensearch.cluster.health.ClusterIndexHealth; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Table; +import org.opensearch.common.collect.Tuple; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; @@ -61,6 +62,9 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.RestResponseListener; +import org.opensearch.rest.action.list.AbstractListAction; +import org.opensearch.rest.pagination.IndexPaginationStrategy; +import org.opensearch.rest.pagination.PageToken; import java.time.Instant; import java.time.ZoneOffset; @@ -68,9 +72,11 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.Spliterators; import java.util.function.Function; @@ -87,7 +93,7 @@ * * @opensearch.api */ -public class RestIndicesAction extends AbstractCatAction { +public class RestIndicesAction extends AbstractListAction { private static final DateFormatter STRICT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_time"); private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestIndicesAction.class); @@ -151,48 +157,70 @@ public RestResponse buildResponse(final Table table) throws Exception { new ActionListener() { @Override public void onResponse(final GetSettingsResponse getSettingsResponse) { - final GroupedActionListener groupedListener = createGroupedListener(request, 4, listener); - groupedListener.onResponse(getSettingsResponse); - // The list of indices that will be returned is determined by the indices returned from the Get Settings call. // All the other requests just provide additional detail, and wildcards may be resolved differently depending on the // type of request in the presence of security plugins (looking at you, ClusterHealthRequest), so // force the IndicesOptions for all the sub-requests to be as inclusive as possible. final IndicesOptions subRequestIndicesOptions = IndicesOptions.lenientExpandHidden(); - // Indices that were successfully resolved during the get settings request might be deleted when the subsequent - // cluster - // state, cluster health and indices stats requests execute. We have to distinguish two cases: - // 1) the deleted index was explicitly passed as parameter to the /_cat/indices request. In this case we want the - // subsequent requests to fail. - // 2) the deleted index was resolved as part of a wildcard or _all. In this case, we want the subsequent requests - // not to - // fail on the deleted index (as we want to ignore wildcards that cannot be resolved). - // This behavior can be ensured by letting the cluster state, cluster health and indices stats requests re-resolve - // the - // index names with the same indices options that we used for the initial cluster state request (strictExpand). - sendIndicesStatsRequest( - indices, - subRequestIndicesOptions, - includeUnloadedSegments, - client, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); + // Indices that were successfully resolved during the get settings request might be deleted when the + // subsequent cluster state, cluster health and indices stats requests execute. We have to distinguish two cases: + // 1) the deleted index was explicitly passed as parameter to the /_cat/indices request. In this case we + // want the subsequent requests to fail. + // 2) the deleted index was resolved as part of a wildcard or _all. In this case, we want the subsequent + // requests not to fail on the deleted index (as we want to ignore wildcards that cannot be resolved). + // This behavior can be ensured by letting the cluster state, cluster health and indices stats requests + // re-resolve the index names with the same indices options that we used for the initial cluster state + // request (strictExpand). sendClusterStateRequest( indices, subRequestIndicesOptions, local, clusterManagerNodeTimeout, client, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) - ); - sendClusterHealthRequest( - indices, - subRequestIndicesOptions, - local, - clusterManagerNodeTimeout, - client, - ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) + new ActionListener() { + @Override + public void onResponse(ClusterStateResponse clusterStateResponse) { + IndexPaginationStrategy paginationStrategy = getPaginationStrategy(clusterStateResponse); + // For non-paginated queries, indicesToBeQueried would be same as indices retrieved from + // rest request and unresolved, while for paginated queries, it would be a list of indices + // already resolved by ClusterStateRequest and to be displayed in a page. + final String[] indicesToBeQueried = Objects.isNull(paginationStrategy) + ? indices + : paginationStrategy.getRequestedEntities().toArray(new String[0]); + final GroupedActionListener groupedListener = createGroupedListener( + request, + 4, + listener, + indicesToBeQueried, + Objects.isNull(paginationStrategy) ? null : paginationStrategy.getResponseToken() + ); + groupedListener.onResponse(getSettingsResponse); + groupedListener.onResponse(clusterStateResponse); + + sendIndicesStatsRequest( + indicesToBeQueried, + subRequestIndicesOptions, + includeUnloadedSegments, + client, + ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) + ); + + sendClusterHealthRequest( + indicesToBeQueried, + subRequestIndicesOptions, + local, + clusterManagerNodeTimeout, + client, + ActionListener.wrap(groupedListener::onResponse, groupedListener::onFailure) + ); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + } ); } @@ -203,6 +231,7 @@ public void onFailure(final Exception e) { } ); }; + } /** @@ -287,7 +316,9 @@ private void sendIndicesStatsRequest( private GroupedActionListener createGroupedListener( final RestRequest request, final int size, - final ActionListener listener + final ActionListener
listener, + final String[] indicesToBeQueried, + final PageToken pageToken ) { return new GroupedActionListener<>(new ActionListener>() { @Override @@ -311,7 +342,15 @@ public void onResponse(final Collection responses) { IndicesStatsResponse statsResponse = extractResponse(responses, IndicesStatsResponse.class); Map indicesStats = statsResponse.getIndices(); - Table responseTable = buildTable(request, indicesSettings, indicesHealths, indicesStats, indicesStates); + Table responseTable = buildTable( + request, + indicesSettings, + indicesHealths, + indicesStats, + indicesStates, + getTableIterator(indicesToBeQueried, indicesSettings), + pageToken + ); listener.onResponse(responseTable); } catch (Exception e) { onFailure(e); @@ -340,7 +379,11 @@ protected Set responseParams() { @Override protected Table getTableWithHeader(final RestRequest request) { - Table table = new Table(); + return getTableWithHeader(request, null); + } + + protected Table getTableWithHeader(final RestRequest request, final PageToken pageToken) { + Table table = new Table(pageToken); table.startHeaders(); table.addCell("health", "alias:h;desc:current health status"); table.addCell("status", "alias:s;desc:open/close status"); @@ -704,22 +747,27 @@ protected Table getTableWithHeader(final RestRequest request) { } // package private for testing - Table buildTable( + protected Table buildTable( final RestRequest request, final Map indicesSettings, final Map indicesHealths, final Map indicesStats, - final Map indicesMetadatas + final Map indicesMetadatas, + final Iterator> tableIterator, + final PageToken pageToken ) { - final String healthParam = request.param("health"); - final Table table = getTableWithHeader(request); + final Table table = getTableWithHeader(request, pageToken); + + while (tableIterator.hasNext()) { + final Tuple tuple = tableIterator.next(); + String indexName = tuple.v1(); + Settings settings = tuple.v2(); - indicesSettings.forEach((indexName, settings) -> { if (indicesMetadatas.containsKey(indexName) == false) { // the index exists in the Get Indices response but is not present in the cluster state: // it is likely that the index was deleted in the meanwhile, so we ignore it. - return; + continue; } final IndexMetadata indexMetadata = indicesMetadatas.get(indexName); @@ -748,7 +796,7 @@ Table buildTable( skip = ClusterHealthStatus.RED != healthStatusFilter; } if (skip) { - return; + continue; } } @@ -982,7 +1030,8 @@ Table buildTable( table.addCell(searchThrottled); table.endRow(); - }); + + } return table; } @@ -991,4 +1040,34 @@ Table buildTable( private static A extractResponse(final Collection responses, Class c) { return (A) responses.stream().filter(c::isInstance).findFirst().get(); } + + @Override + public boolean isActionPaginated() { + return false; + } + + protected IndexPaginationStrategy getPaginationStrategy(ClusterStateResponse clusterStateResponse) { + return null; + } + + /** + * Provides the iterator to be used for building the response table. + */ + protected Iterator> getTableIterator(String[] indices, Map indexSettingsMap) { + return new Iterator<>() { + final Iterator settingsMapIter = indexSettingsMap.keySet().iterator(); + + @Override + public boolean hasNext() { + return settingsMapIter.hasNext(); + } + + @Override + public Tuple next() { + String index = settingsMapIter.next(); + return new Tuple<>(index, indexSettingsMap.get(index)); + } + }; + } + } diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java index 4f1090b163ee6..d622dd7a956f4 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestTable.java @@ -58,8 +58,11 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; +import static org.opensearch.rest.pagination.PageToken.PAGINATED_RESPONSE_NEXT_TOKEN_KEY; + /** * a REST table * @@ -87,8 +90,37 @@ public static RestResponse buildXContentBuilder(Table table, RestChannel channel RestRequest request = channel.request(); XContentBuilder builder = channel.newBuilder(); List displayHeaders = buildDisplayHeaders(table, request); + if (Objects.nonNull(table.getPageToken())) { + buildPaginatedXContentBuilder(table, request, builder, displayHeaders); + } else { + builder.startArray(); + addRowsToXContentBuilder(table, request, builder, displayHeaders); + builder.endArray(); + } + return new BytesRestResponse(RestStatus.OK, builder); + } + + private static void buildPaginatedXContentBuilder( + Table table, + RestRequest request, + XContentBuilder builder, + List displayHeaders + ) throws Exception { + assert Objects.nonNull(table.getPageToken().getPaginatedEntity()) : "Paginated element is required in-case of paginated responses"; + builder.startObject(); + builder.field(PAGINATED_RESPONSE_NEXT_TOKEN_KEY, table.getPageToken().getNextToken()); + builder.startArray(table.getPageToken().getPaginatedEntity()); + addRowsToXContentBuilder(table, request, builder, displayHeaders); + builder.endArray(); + builder.endObject(); + } - builder.startArray(); + private static void addRowsToXContentBuilder( + Table table, + RestRequest request, + XContentBuilder builder, + List displayHeaders + ) throws Exception { List rowOrder = getRowOrder(table, request); for (Integer row : rowOrder) { builder.startObject(); @@ -97,8 +129,6 @@ public static RestResponse buildXContentBuilder(Table table, RestChannel channel } builder.endObject(); } - builder.endArray(); - return new BytesRestResponse(RestStatus.OK, builder); } public static RestResponse buildTextPlainResponse(Table table, RestChannel channel) throws IOException { @@ -136,6 +166,11 @@ public static RestResponse buildTextPlainResponse(Table table, RestChannel chann } out.append("\n"); } + // Adding a new row for next_token, in the response if the table is paginated. + if (Objects.nonNull(table.getPageToken())) { + out.append("next_token" + " " + table.getPageToken().getNextToken()); + out.append("\n"); + } out.close(); return new BytesRestResponse(RestStatus.OK, BytesRestResponse.TEXT_CONTENT_TYPE, bytesOut.bytes()); } diff --git a/server/src/main/java/org/opensearch/rest/action/list/AbstractListAction.java b/server/src/main/java/org/opensearch/rest/action/list/AbstractListAction.java new file mode 100644 index 0000000000000..f3d6d6653a550 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/list/AbstractListAction.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.list; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.cat.AbstractCatAction; +import org.opensearch.rest.pagination.PageParams; + +import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.rest.pagination.PageParams.PARAM_ASC_SORT_VALUE; +import static org.opensearch.rest.pagination.PageParams.PARAM_DESC_SORT_VALUE; + +/** + * Base Transport action class for _list API. + * Serves as a base class for APIs wanting to support pagination. + * Existing _cat APIs can refer {@link org.opensearch.rest.action.cat.RestIndicesAction}. + * @opensearch.api + */ +public abstract class AbstractListAction extends AbstractCatAction { + + private static final int DEFAULT_PAGE_SIZE = 100; + protected PageParams pageParams; + + protected abstract void documentation(StringBuilder sb); + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + boolean helpWanted = request.paramAsBoolean("help", false); + if (helpWanted || isActionPaginated() == false) { + return super.prepareRequest(request, client); + } + this.pageParams = validateAndGetPageParams(request); + assert Objects.nonNull(pageParams) : "pageParams can not be null for paginated queries"; + return doCatRequest(request, client); + } + + @Override + public boolean isActionPaginated() { + return true; + } + + /** + * + * @return Metadata that can be extracted out from the rest request. Query params supported by the action specific + * to pagination along with any respective validations to be added here. + */ + protected PageParams validateAndGetPageParams(RestRequest restRequest) { + PageParams pageParams = restRequest.parsePaginatedQueryParams(defaultSort(), defaultPageSize()); + // validating pageSize + if (pageParams.getSize() <= 0) { + throw new IllegalArgumentException("size must be greater than zero"); + } + // Validating sort order + if (!(PARAM_ASC_SORT_VALUE.equals(pageParams.getSort()) || PARAM_DESC_SORT_VALUE.equals(pageParams.getSort()))) { + throw new IllegalArgumentException("value of sort can either be asc or desc"); + } + return pageParams; + } + + protected int defaultPageSize() { + return DEFAULT_PAGE_SIZE; + } + + protected String defaultSort() { + return PARAM_ASC_SORT_VALUE; + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/list/RestIndicesListAction.java b/server/src/main/java/org/opensearch/rest/action/list/RestIndicesListAction.java new file mode 100644 index 0000000000000..ad5c58c86ce90 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/list/RestIndicesListAction.java @@ -0,0 +1,104 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.list; + +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.cat.RestIndicesAction; +import org.opensearch.rest.pagination.IndexPaginationStrategy; +import org.opensearch.rest.pagination.PageParams; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * _list API action to output indices in pages. + * + * @opensearch.api + */ +public class RestIndicesListAction extends RestIndicesAction { + + private static final int MAX_SUPPORTED_LIST_INDICES_PAGE_SIZE = 5000; + private static final int DEFAULT_LIST_INDICES_PAGE_SIZE = 500; + + @Override + public List routes() { + return unmodifiableList(asList(new Route(GET, "/_list/indices"), new Route(GET, "/_list/indices/{index}"))); + } + + @Override + public String getName() { + return "list_indices_action"; + } + + @Override + protected void documentation(StringBuilder sb) { + sb.append("/_list/indices\n"); + sb.append("/_list/indices/{index}\n"); + } + + @Override + public boolean isActionPaginated() { + return true; + } + + @Override + protected PageParams validateAndGetPageParams(RestRequest restRequest) { + PageParams pageParams = super.validateAndGetPageParams(restRequest); + // validate max supported pageSize + if (pageParams.getSize() > MAX_SUPPORTED_LIST_INDICES_PAGE_SIZE) { + throw new IllegalArgumentException("size should be less than [" + MAX_SUPPORTED_LIST_INDICES_PAGE_SIZE + "]"); + } + // Next Token in the request will be validated by the IndexStrategyToken itself. + if (Objects.nonNull(pageParams.getRequestedToken())) { + IndexPaginationStrategy.IndexStrategyToken.validateIndexStrategyToken(pageParams.getRequestedToken()); + } + return pageParams; + } + + protected int defaultPageSize() { + return DEFAULT_LIST_INDICES_PAGE_SIZE; + } + + @Override + protected IndexPaginationStrategy getPaginationStrategy(ClusterStateResponse clusterStateResponse) { + return new IndexPaginationStrategy(pageParams, clusterStateResponse.getState()); + } + + // Public for testing + @Override + public Iterator> getTableIterator(String[] indices, Map indexSettingsMap) { + return new Iterator<>() { + int indexPos = 0; + + @Override + public boolean hasNext() { + while (indexPos < indices.length && indexSettingsMap.containsKey(indices[indexPos]) == false) { + indexPos++; + } + return indexPos < indices.length; + } + + @Override + public Tuple next() { + Tuple tuple = new Tuple<>(indices[indexPos], indexSettingsMap.get(indices[indexPos])); + indexPos++; + return tuple; + } + }; + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/list/RestListAction.java b/server/src/main/java/org/opensearch/rest/action/list/RestListAction.java new file mode 100644 index 0000000000000..4b8551ea7e14a --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/list/RestListAction.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.list; + +import org.opensearch.client.node.NodeClient; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestRequest; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.GET; + +/** + * Base _list API endpoint + * + * @opensearch.api + */ +public class RestListAction extends BaseRestHandler { + + private static final String LIST = ":‑|"; + private static final String LIST_NL = LIST + "\n"; + private final String HELP; + + public RestListAction(List listActions) { + StringBuilder sb = new StringBuilder(); + sb.append(LIST_NL); + for (AbstractListAction listAction : listActions) { + listAction.documentation(sb); + } + HELP = sb.toString(); + } + + @Override + public List routes() { + return singletonList(new Route(GET, "/_list")); + } + + @Override + public String getName() { + return "list_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + return channel -> channel.sendResponse(new BytesRestResponse(RestStatus.OK, HELP)); + } + +} diff --git a/server/src/main/java/org/opensearch/rest/action/list/package-info.java b/server/src/main/java/org/opensearch/rest/action/list/package-info.java new file mode 100644 index 0000000000000..8d6563ff9b344 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/list/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * {@link org.opensearch.rest.RestHandler}s for actions that list out results in chunks of pages. + */ +package org.opensearch.rest.action.list; diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java index 3a6b45013e892..05465e32631fd 100644 --- a/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/opensearch/rest/action/search/RestSearchAction.java @@ -210,7 +210,7 @@ public static void parseSearchRequest( searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); - searchRequest.pipeline(request.param("search_pipeline")); + searchRequest.pipeline(request.param("search_pipeline", searchRequest.source().pipeline())); checkRestTotalHits(request, searchRequest); request.paramAsBoolean(INCLUDE_NAMED_QUERIES_SCORE_PARAM, false); diff --git a/server/src/main/java/org/opensearch/rest/pagination/IndexPaginationStrategy.java b/server/src/main/java/org/opensearch/rest/pagination/IndexPaginationStrategy.java new file mode 100644 index 0000000000000..f89ab14e4b24d --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/pagination/IndexPaginationStrategy.java @@ -0,0 +1,185 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.pagination; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Nullable; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static org.opensearch.rest.pagination.PageParams.PARAM_ASC_SORT_VALUE; + +/** + * This strategy can be used by the Rest APIs wanting to paginate the responses based on Indices. + * The strategy considers create timestamps of indices as the keys to iterate over pages. + * + * @opensearch.internal + */ +public class IndexPaginationStrategy implements PaginationStrategy { + private static final String DEFAULT_INDICES_PAGINATED_ENTITY = "indices"; + + private static final Comparator ASC_COMPARATOR = (metadata1, metadata2) -> { + if (metadata1.getCreationDate() == metadata2.getCreationDate()) { + return metadata1.getIndex().getName().compareTo(metadata2.getIndex().getName()); + } + return Long.compare(metadata1.getCreationDate(), metadata2.getCreationDate()); + }; + private static final Comparator DESC_COMPARATOR = (metadata1, metadata2) -> { + if (metadata1.getCreationDate() == metadata2.getCreationDate()) { + return metadata2.getIndex().getName().compareTo(metadata1.getIndex().getName()); + } + return Long.compare(metadata2.getCreationDate(), metadata1.getCreationDate()); + }; + + private final PageToken pageToken; + private final List requestedIndices; + + public IndexPaginationStrategy(PageParams pageParams, ClusterState clusterState) { + // Get list of indices metadata sorted by their creation time and filtered by the last sent index + List sortedIndices = PaginationStrategy.getSortedIndexMetadata( + clusterState, + getMetadataFilter(pageParams.getRequestedToken(), pageParams.getSort()), + PARAM_ASC_SORT_VALUE.equals(pageParams.getSort()) ? ASC_COMPARATOR : DESC_COMPARATOR + ); + // Trim sortedIndicesList to get the list of indices metadata to be sent as response + List metadataSublist = getMetadataSubList(sortedIndices, pageParams.getSize()); + // Get list of index names from the trimmed metadataSublist + this.requestedIndices = metadataSublist.stream().map(metadata -> metadata.getIndex().getName()).collect(Collectors.toList()); + this.pageToken = getResponseToken( + pageParams.getSize(), + sortedIndices.size(), + metadataSublist.isEmpty() ? null : metadataSublist.get(metadataSublist.size() - 1) + ); + } + + private static Predicate getMetadataFilter(String requestedTokenStr, String sortOrder) { + boolean isAscendingSort = sortOrder.equals(PARAM_ASC_SORT_VALUE); + IndexStrategyToken requestedToken = Objects.isNull(requestedTokenStr) || requestedTokenStr.isEmpty() + ? null + : new IndexStrategyToken(requestedTokenStr); + if (Objects.isNull(requestedToken)) { + return indexMetadata -> true; + } + return metadata -> { + if (metadata.getIndex().getName().equals(requestedToken.lastIndexName)) { + return false; + } else if (metadata.getCreationDate() == requestedToken.lastIndexCreationTime) { + return isAscendingSort + ? metadata.getIndex().getName().compareTo(requestedToken.lastIndexName) > 0 + : metadata.getIndex().getName().compareTo(requestedToken.lastIndexName) < 0; + } + return isAscendingSort + ? metadata.getCreationDate() > requestedToken.lastIndexCreationTime + : metadata.getCreationDate() < requestedToken.lastIndexCreationTime; + }; + } + + private List getMetadataSubList(List sortedIndices, final int pageSize) { + if (sortedIndices.isEmpty()) { + return new ArrayList<>(); + } + return sortedIndices.subList(0, Math.min(pageSize, sortedIndices.size())); + } + + private PageToken getResponseToken(final int pageSize, final int totalIndices, IndexMetadata lastIndex) { + if (totalIndices <= pageSize) { + return new PageToken(null, DEFAULT_INDICES_PAGINATED_ENTITY); + } + return new PageToken( + new IndexStrategyToken(lastIndex.getCreationDate(), lastIndex.getIndex().getName()).generateEncryptedToken(), + DEFAULT_INDICES_PAGINATED_ENTITY + ); + } + + @Override + @Nullable + public PageToken getResponseToken() { + return pageToken; + } + + @Override + public List getRequestedEntities() { + return Objects.isNull(requestedIndices) ? new ArrayList<>() : requestedIndices; + } + + /** + * TokenParser to be used by {@link IndexPaginationStrategy}. + * Token would look like: CreationTimeOfLastRespondedIndex + | + NameOfLastRespondedIndex + */ + public static class IndexStrategyToken { + + private static final String JOIN_DELIMITER = "|"; + private static final String SPLIT_REGEX = "\\|"; + private static final int CREATE_TIME_POS_IN_TOKEN = 0; + private static final int INDEX_NAME_POS_IN_TOKEN = 1; + + /** + * Represents creation times of last index which was displayed in the page. + * Used to identify the new start point in case the indices get created/deleted while queries are executed. + */ + private final long lastIndexCreationTime; + + /** + * Represents name of the last index which was displayed in the page. + * Used to identify whether the sorted list of indices has changed or not. + */ + private final String lastIndexName; + + public IndexStrategyToken(String requestedTokenString) { + // TODO: Avoid validating the requested token multiple times while calling from Rest and/or Transport layer. + validateIndexStrategyToken(requestedTokenString); + String decryptedToken = PaginationStrategy.decryptStringToken(requestedTokenString); + final String[] decryptedTokenElements = decryptedToken.split(SPLIT_REGEX); + this.lastIndexCreationTime = Long.parseLong(decryptedTokenElements[CREATE_TIME_POS_IN_TOKEN]); + this.lastIndexName = decryptedTokenElements[INDEX_NAME_POS_IN_TOKEN]; + } + + public IndexStrategyToken(long creationTimeOfLastRespondedIndex, String nameOfLastRespondedIndex) { + Objects.requireNonNull(nameOfLastRespondedIndex, "index name should be provided"); + this.lastIndexCreationTime = creationTimeOfLastRespondedIndex; + this.lastIndexName = nameOfLastRespondedIndex; + } + + public String generateEncryptedToken() { + return PaginationStrategy.encryptStringToken(String.join(JOIN_DELIMITER, String.valueOf(lastIndexCreationTime), lastIndexName)); + } + + /** + * Will perform simple validations on token received in the request. + * Token should be base64 encoded, and should contain the expected number of elements separated by "|". + * Timestamps should also be a valid long. + * + * @param requestedTokenStr string denoting the encoded token requested by the user. + */ + public static void validateIndexStrategyToken(String requestedTokenStr) { + Objects.requireNonNull(requestedTokenStr, "requestedTokenString can not be null"); + String decryptedToken = PaginationStrategy.decryptStringToken(requestedTokenStr); + final String[] decryptedTokenElements = decryptedToken.split(SPLIT_REGEX); + if (decryptedTokenElements.length != 2) { + throw new OpenSearchParseException(INCORRECT_TAINTED_NEXT_TOKEN_ERROR_MESSAGE); + } + try { + long creationTimeOfLastRespondedIndex = Long.parseLong(decryptedTokenElements[CREATE_TIME_POS_IN_TOKEN]); + if (creationTimeOfLastRespondedIndex <= 0) { + throw new OpenSearchParseException(INCORRECT_TAINTED_NEXT_TOKEN_ERROR_MESSAGE); + } + } catch (NumberFormatException exception) { + throw new OpenSearchParseException(INCORRECT_TAINTED_NEXT_TOKEN_ERROR_MESSAGE); + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/rest/pagination/PageParams.java b/server/src/main/java/org/opensearch/rest/pagination/PageParams.java new file mode 100644 index 0000000000000..9b2074bc3fed0 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/pagination/PageParams.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.pagination; + +import org.opensearch.common.annotation.PublicApi; + +/** + * + * Class specific to paginated queries, which will contain common query params required by a paginated API. + */ +@PublicApi(since = "3.0.0") +public class PageParams { + + public static final String PARAM_SORT = "sort"; + public static final String PARAM_NEXT_TOKEN = "next_token"; + public static final String PARAM_SIZE = "size"; + public static final String PARAM_ASC_SORT_VALUE = "asc"; + public static final String PARAM_DESC_SORT_VALUE = "desc"; + + private final String requestedTokenStr; + private final String sort; + private final int size; + + public PageParams(String requestedToken, String sort, int size) { + this.requestedTokenStr = requestedToken; + this.sort = sort; + this.size = size; + } + + public String getSort() { + return sort; + } + + public String getRequestedToken() { + return requestedTokenStr; + } + + public int getSize() { + return size; + } + +} diff --git a/server/src/main/java/org/opensearch/rest/pagination/PageToken.java b/server/src/main/java/org/opensearch/rest/pagination/PageToken.java new file mode 100644 index 0000000000000..d62e1be695715 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/pagination/PageToken.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.pagination; + +/** + * Pagination response metadata for a paginated query. + * @opensearch.internal + */ +public class PageToken { + + public static final String PAGINATED_RESPONSE_NEXT_TOKEN_KEY = "next_token"; + + /** + * String denoting the next_token of paginated response, which will be used to fetch next page (if any). + */ + private final String nextToken; + + /** + * String denoting the element which is being paginated (for e.g. shards, indices..). + */ + private final String paginatedEntity; + + public PageToken(String nextToken, String paginatedElement) { + assert paginatedElement != null : "paginatedElement must be specified for a paginated response"; + this.nextToken = nextToken; + this.paginatedEntity = paginatedElement; + } + + public String getNextToken() { + return nextToken; + } + + public String getPaginatedEntity() { + return paginatedEntity; + } +} diff --git a/server/src/main/java/org/opensearch/rest/pagination/PaginationStrategy.java b/server/src/main/java/org/opensearch/rest/pagination/PaginationStrategy.java new file mode 100644 index 0000000000000..7f9825a7cc09b --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/pagination/PaginationStrategy.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.pagination; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; + +import java.util.Base64; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static java.nio.charset.StandardCharsets.UTF_8; + +/** + * Interface to be implemented by any strategy getting used for paginating rest responses. + * + * @opensearch.internal + */ +public interface PaginationStrategy { + + String INCORRECT_TAINTED_NEXT_TOKEN_ERROR_MESSAGE = + "Parameter [next_token] has been tainted and is incorrect. Please provide a valid [next_token]."; + + /** + * + * @return Base64 encoded string, which can be used to fetch next page of response. + */ + PageToken getResponseToken(); + + /** + * + * @return List of elements fetched corresponding to the store and token received by the strategy. + */ + List getRequestedEntities(); + + /** + * + * Utility method to get list of indices filtered as per {@param filterPredicate} and the sorted according to {@param comparator}. + */ + static List getSortedIndexMetadata( + final ClusterState clusterState, + Predicate filterPredicate, + Comparator comparator + ) { + return clusterState.metadata().indices().values().stream().filter(filterPredicate).sorted(comparator).collect(Collectors.toList()); + } + + static String encryptStringToken(String tokenString) { + if (Objects.isNull(tokenString)) { + return null; + } + return Base64.getEncoder().encodeToString(tokenString.getBytes(UTF_8)); + } + + static String decryptStringToken(String encTokenString) { + if (Objects.isNull(encTokenString)) { + return null; + } + try { + return new String(Base64.getDecoder().decode(encTokenString), UTF_8); + } catch (IllegalArgumentException exception) { + throw new OpenSearchParseException(INCORRECT_TAINTED_NEXT_TOKEN_ERROR_MESSAGE); + } + } +} diff --git a/server/src/main/java/org/opensearch/rest/pagination/package-info.java b/server/src/main/java/org/opensearch/rest/pagination/package-info.java new file mode 100644 index 0000000000000..324b8a6c46f88 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/pagination/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Exposes utilities for Rest actions to paginate responses. + */ +package org.opensearch.rest.pagination; diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index 8a9704b04566f..9c438401b9fbe 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -224,6 +224,8 @@ public static HighlightBuilder highlight() { private Map searchPipelineSource = null; + private String searchPipeline; + /** * Constructs a new search source builder. */ @@ -297,6 +299,9 @@ public SearchSourceBuilder(StreamInput in) throws IOException { derivedFields = in.readList(DerivedField::new); } } + if (in.getVersion().onOrAfter(Version.V_2_18_0)) { + searchPipeline = in.readOptionalString(); + } } @Override @@ -377,6 +382,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeList(derivedFields); } } + if (out.getVersion().onOrAfter(Version.V_2_18_0)) { + out.writeOptionalString(searchPipeline); + } } /** @@ -1111,6 +1119,13 @@ public Map searchPipelineSource() { return searchPipelineSource; } + /** + * @return a search pipeline name defined within the search source (see {@link org.opensearch.search.pipeline.SearchPipelineService}) + */ + public String pipeline() { + return searchPipeline; + } + /** * Define a search pipeline to process this search request and/or its response. See {@link org.opensearch.search.pipeline.SearchPipelineService}. */ @@ -1119,6 +1134,14 @@ public SearchSourceBuilder searchPipelineSource(Map searchPipeli return this; } + /** + * Define a search pipeline name to process this search request and/or its response. See {@link org.opensearch.search.pipeline.SearchPipelineService}. + */ + public SearchSourceBuilder pipeline(String searchPipeline) { + this.searchPipeline = searchPipeline; + return this; + } + /** * Rewrites this search source builder into its primitive form. e.g. by * rewriting the QueryBuilder. If the builder did not change the identity @@ -1216,6 +1239,7 @@ private SearchSourceBuilder shallowCopy( rewrittenBuilder.pointInTimeBuilder = pointInTimeBuilder; rewrittenBuilder.derivedFieldsObject = derivedFieldsObject; rewrittenBuilder.derivedFields = derivedFields; + rewrittenBuilder.searchPipeline = searchPipeline; return rewrittenBuilder; } @@ -1283,6 +1307,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th sort(parser.text()); } else if (PROFILE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { profile = parser.booleanValue(); + } else if (SEARCH_PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { + searchPipeline = parser.text(); } else { throw new ParsingException( parser.getTokenLocation(), @@ -1612,6 +1638,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t } + if (searchPipeline != null) { + builder.field(SEARCH_PIPELINE.getPreferredName(), searchPipeline); + } + return builder; } @@ -1889,7 +1919,8 @@ public int hashCode() { trackTotalHitsUpTo, pointInTimeBuilder, derivedFieldsObject, - derivedFields + derivedFields, + searchPipeline ); } @@ -1934,7 +1965,8 @@ public boolean equals(Object obj) { && Objects.equals(trackTotalHitsUpTo, other.trackTotalHitsUpTo) && Objects.equals(pointInTimeBuilder, other.pointInTimeBuilder) && Objects.equals(derivedFieldsObject, other.derivedFieldsObject) - && Objects.equals(derivedFields, other.derivedFields); + && Objects.equals(derivedFields, other.derivedFields) + && Objects.equals(searchPipeline, other.searchPipeline); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index f6e550525a3e5..22b2a72b36026 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -468,26 +468,33 @@ public void createSnapshotV2(final CreateSnapshotRequest request, final ActionLi long pinnedTimestamp = System.currentTimeMillis(); final String repositoryName = request.repository(); final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); - validate(repositoryName, snapshotName); - final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot Repository repository = repositoriesService.repository(repositoryName); + validate(repositoryName, snapshotName); + repository.executeConsistentStateUpdate(repositoryData -> new ClusterStateUpdateTask(Priority.URGENT) { + private SnapshotsInProgress.Entry newEntry; - if (repository.isReadOnly()) { - listener.onFailure( - new RepositoryException(repository.getMetadata().name(), "cannot create snapshot-v2 in a readonly repository") - ); - return; - } + private SnapshotId snapshotId; - final Snapshot snapshot = new Snapshot(repositoryName, snapshotId); - ClusterState currentState = clusterService.state(); - final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); - try { - final StepListener repositoryDataListener = new StepListener<>(); - repositoriesService.getRepositoryData(repositoryName, repositoryDataListener); + private Snapshot snapshot; + + boolean enteredLoop; + + @Override + public ClusterState execute(ClusterState currentState) { + // move to in progress + snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot + Repository repository = repositoriesService.repository(repositoryName); + + if (repository.isReadOnly()) { + listener.onFailure( + new RepositoryException(repository.getMetadata().name(), "cannot create snapshot-v2 in a readonly repository") + ); + } + + snapshot = new Snapshot(repositoryName, snapshotId); + final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); - repositoryDataListener.whenComplete(repositoryData -> { createSnapshotPreValidations(currentState, repositoryData, repositoryName, snapshotName); List indices = new ArrayList<>(currentState.metadata().indices().keySet()); @@ -498,7 +505,7 @@ public void createSnapshotV2(final CreateSnapshotRequest request, final ActionLi request.indices() ); - logger.trace("[{}][{}] creating snapshot-v2 for indices [{}]", repositoryName, snapshotName, indices); + logger.info("[{}][{}] creating snapshot-v2 for indices [{}]", repositoryName, snapshotName, indices); final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final List runningSnapshots = snapshots.entries(); @@ -509,74 +516,139 @@ public void createSnapshotV2(final CreateSnapshotRequest request, final ActionLi IndexId.DEFAULT_SHARD_PATH_TYPE ); final Version version = minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null); - final ShardGenerations shardGenerations = buildShardsGenerationFromRepositoryData( - currentState.metadata(), - currentState.routingTable(), - indexIds, - repositoryData - ); if (repositoryData.getGenId() == RepositoryData.UNKNOWN_REPO_GEN) { logger.debug("[{}] was aborted before starting", snapshot); throw new SnapshotException(snapshot, "Aborted on initialization"); } + + Map shards = new HashMap<>(); + + newEntry = SnapshotsInProgress.startedEntry( + new Snapshot(repositoryName, snapshotId), + request.includeGlobalState(), + request.partial(), + indexIds, + dataStreams, + threadPool.absoluteTimeInMillis(), + repositoryData.getGenId(), + shards, + userMeta, + version, + true, + true + ); + final List newEntries = new ArrayList<>(runningSnapshots); + newEntries.add(newEntry); + + // Entering finalize loop here to prevent concurrent snapshots v2 snapshots + enteredLoop = tryEnterRepoLoop(repositoryName); + if (enteredLoop == false) { + throw new ConcurrentSnapshotExecutionException( + repositoryName, + snapshotName, + "cannot start snapshot-v2 while a repository is in finalization state" + ); + } + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(new ArrayList<>(newEntries))) + .build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot-v2", repositoryName, snapshotName), e); + listener.onFailure(e); + if (enteredLoop) { + leaveRepoLoop(repositoryName); + } + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { + final ShardGenerations shardGenerations = buildShardsGenerationFromRepositoryData( + newState.metadata(), + newState.routingTable(), + newEntry.indices(), + repositoryData + ); + final List dataStreams = indexNameExpressionResolver.dataStreamNames( + newState, + request.indicesOptions(), + request.indices() + ); final SnapshotInfo snapshotInfo = new SnapshotInfo( - snapshot.getSnapshotId(), + snapshotId, shardGenerations.indices().stream().map(IndexId::getName).collect(Collectors.toList()), - dataStreams, + newEntry.dataStreams(), pinnedTimestamp, null, System.currentTimeMillis(), shardGenerations.totalShards(), Collections.emptyList(), request.includeGlobalState(), - userMeta, + newEntry.userMetadata(), true, pinnedTimestamp ); - if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { - throw new SnapshotException(repositoryName, snapshotName, "Aborting snapshot-v2, no longer cluster manager"); - } + final Version version = minCompatibleVersion(newState.nodes().getMinNodeVersion(), repositoryData, null); final StepListener pinnedTimestampListener = new StepListener<>(); - pinnedTimestampListener.whenComplete(repoData -> { listener.onResponse(snapshotInfo); }, listener::onFailure); - repository.finalizeSnapshot( - shardGenerations, - repositoryData.getGenId(), - metadataForSnapshot(currentState.metadata(), request.includeGlobalState(), false, dataStreams, indexIds), - snapshotInfo, - version, - state -> state, - Priority.IMMEDIATE, - new ActionListener() { - @Override - public void onResponse(RepositoryData repositoryData) { - if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { - failSnapshotCompletionListeners( - snapshot, - new SnapshotException(snapshot, "Aborting snapshot-v2, no longer cluster manager") - ); - listener.onFailure( - new SnapshotException(repositoryName, snapshotName, "Aborting snapshot-v2, no longer cluster manager") - ); - return; + pinnedTimestampListener.whenComplete(repoData -> { + repository.finalizeSnapshot( + shardGenerations, + repositoryData.getGenId(), + metadataForSnapshot(newState.metadata(), request.includeGlobalState(), false, dataStreams, newEntry.indices()), + snapshotInfo, + version, + state -> stateWithoutSnapshot(state, snapshot), + Priority.IMMEDIATE, + new ActionListener() { + @Override + public void onResponse(RepositoryData repositoryData) { + leaveRepoLoop(repositoryName); + if (clusterService.state().nodes().isLocalNodeElectedClusterManager() == false) { + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "Aborting snapshot-v2, no longer cluster manager") + ); + listener.onFailure( + new SnapshotException( + repositoryName, + snapshotName, + "Aborting snapshot-v2, no longer cluster manager" + ) + ); + return; + } + listener.onResponse(snapshotInfo); } - updateSnapshotPinnedTimestamp(repositoryData, snapshot, pinnedTimestamp, pinnedTimestampListener); - } - @Override - public void onFailure(Exception e) { - logger.error("Failed to upload files to snapshot repo {} for snapshot-v2 {} ", repositoryName, snapshotName); - listener.onFailure(e); + @Override + public void onFailure(Exception e) { + logger.error("Failed to finalize snapshot repo {} for snapshot-v2 {} ", repositoryName, snapshotName); + leaveRepoLoop(repositoryName); + // cleaning up in progress snapshot here + stateWithoutSnapshotV2(newState); + listener.onFailure(e); + } } - } - ); + ); + }, e -> { + logger.error("Failed to update pinned timestamp for snapshot-v2 {} {} {} ", repositoryName, snapshotName, e); + leaveRepoLoop(repositoryName); + // cleaning up in progress snapshot here + stateWithoutSnapshotV2(newState); + listener.onFailure(e); + }); + updateSnapshotPinnedTimestamp(repositoryData, snapshot, pinnedTimestamp, pinnedTimestampListener); + } - }, listener::onFailure); - } catch (Exception e) { - assert false : new AssertionError(e); - logger.error("Snapshot-v2 {} creation failed with exception {}", snapshot.getSnapshotId().getName(), e); - listener.onFailure(e); - } + @Override + public TimeValue timeout() { + return request.clusterManagerNodeTimeout(); + } + + }, "create_snapshot [" + snapshotName + ']', listener::onFailure); } private void createSnapshotPreValidations( @@ -613,7 +685,7 @@ private void updateSnapshotPinnedTimestamp( ) { remoteStorePinnedTimestampService.pinTimestamp( timestampToPin, - snapshot.getRepository() + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + snapshot.getSnapshotId().getUUID(), + getPinningEntity(snapshot.getRepository(), snapshot.getSnapshotId().getUUID()), new ActionListener() { @Override public void onResponse(Void unused) { @@ -631,6 +703,10 @@ public void onFailure(Exception e) { ); } + public static String getPinningEntity(String repositoryName, String snapshotUUID) { + return repositoryName + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + snapshotUUID; + } + private void cloneSnapshotPinnedTimestamp( RepositoryData repositoryData, SnapshotId sourceSnapshot, @@ -640,8 +716,8 @@ private void cloneSnapshotPinnedTimestamp( ) { remoteStorePinnedTimestampService.cloneTimestamp( timestampToPin, - snapshot.getRepository() + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + sourceSnapshot.getUUID(), - snapshot.getRepository() + SNAPSHOT_PINNED_TIMESTAMP_DELIMITER + snapshot.getSnapshotId().getUUID(), + getPinningEntity(snapshot.getRepository(), sourceSnapshot.getUUID()), + getPinningEntity(snapshot.getRepository(), snapshot.getSnapshotId().getUUID()), new ActionListener() { @Override public void onResponse(Void unused) { @@ -766,12 +842,24 @@ public void cloneSnapshotV2( private SnapshotId sourceSnapshotId; private List indicesForSnapshot; + boolean enteredRepoLoop; + @Override public ClusterState execute(ClusterState currentState) { createSnapshotPreValidations(currentState, repositoryData, repositoryName, snapshotName); final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final List runningSnapshots = snapshots.entries(); + // Entering finalize loop here to prevent concurrent snapshots v2 snapshots + enteredRepoLoop = tryEnterRepoLoop(repositoryName); + if (enteredRepoLoop == false) { + throw new ConcurrentSnapshotExecutionException( + repositoryName, + snapshotName, + "cannot start snapshot-v2 while a repository is in finalization state" + ); + } + sourceSnapshotId = repositoryData.getSnapshotIds() .stream() .filter(src -> src.getName().equals(request.source())) @@ -795,14 +883,14 @@ public ClusterState execute(ClusterState currentState) { indicesForSnapshot.add(indexId.getName()); } } - newEntry = SnapshotsInProgress.startClone( snapshot, sourceSnapshotId, repositoryData.resolveIndices(indicesForSnapshot), threadPool.absoluteTimeInMillis(), repositoryData.getGenId(), - minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null) + minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null), + true ); final List newEntries = new ArrayList<>(runningSnapshots); newEntries.add(newEntry); @@ -813,6 +901,9 @@ public ClusterState execute(ClusterState currentState) { public void onFailure(String source, Exception e) { logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to clone snapshot-v2", repositoryName, snapshotName), e); listener.onFailure(e); + if (enteredRepoLoop) { + leaveRepoLoop(repositoryName); + } } @Override @@ -839,67 +930,80 @@ public void clusterStateProcessed(String source, ClusterState oldState, final Cl true, snapshotInfo.getPinnedTimestamp() ); - if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { + if (clusterService.state().nodes().isLocalNodeElectedClusterManager() == false) { throw new SnapshotException(repositoryName, snapshotName, "Aborting snapshot-v2 clone, no longer cluster manager"); } final StepListener pinnedTimestampListener = new StepListener<>(); pinnedTimestampListener.whenComplete(repoData -> { - logger.info("snapshot-v2 clone [{}] completed successfully", snapshot); - listener.onResponse(null); - }, listener::onFailure); - repository.finalizeSnapshot( - shardGenerations, - repositoryData.getGenId(), - metadataForSnapshot( - currentState.metadata(), - newEntry.includeGlobalState(), - false, - newEntry.dataStreams(), - newEntry.indices() - ), - cloneSnapshotInfo, - repositoryData.getVersion(sourceSnapshotId), - state -> stateWithoutSnapshot(state, snapshot), - Priority.IMMEDIATE, - new ActionListener() { - @Override - public void onResponse(RepositoryData repositoryData) { - if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { - failSnapshotCompletionListeners( - snapshot, - new SnapshotException(snapshot, "Aborting Snapshot-v2 clone, no longer cluster manager") - ); - listener.onFailure( - new SnapshotException( - repositoryName, - snapshotName, - "Aborting Snapshot-v2 clone, no longer cluster manager" - ) - ); - return; + repository.finalizeSnapshot( + shardGenerations, + repositoryData.getGenId(), + metadataForSnapshot( + currentState.metadata(), + newEntry.includeGlobalState(), + false, + newEntry.dataStreams(), + newEntry.indices() + ), + cloneSnapshotInfo, + repositoryData.getVersion(sourceSnapshotId), + state -> stateWithoutSnapshot(state, snapshot), + Priority.IMMEDIATE, + new ActionListener() { + @Override + public void onResponse(RepositoryData repositoryData) { + leaveRepoLoop(repositoryName); + if (!clusterService.state().nodes().isLocalNodeElectedClusterManager()) { + failSnapshotCompletionListeners( + snapshot, + new SnapshotException(snapshot, "Aborting Snapshot-v2 clone, no longer cluster manager") + ); + listener.onFailure( + new SnapshotException( + repositoryName, + snapshotName, + "Aborting Snapshot-v2 clone, no longer cluster manager" + ) + ); + return; + } + logger.info("snapshot-v2 clone [{}] completed successfully", snapshot); + listener.onResponse(null); } - cloneSnapshotPinnedTimestamp( - repositoryData, - sourceSnapshotId, - snapshot, - snapshotInfo.getPinnedTimestamp(), - pinnedTimestampListener - ); - } - @Override - public void onFailure(Exception e) { - logger.error( - "Failed to upload files to snapshot repo {} for clone snapshot-v2 {} ", - repositoryName, - snapshotName - ); - listener.onFailure(e); + @Override + public void onFailure(Exception e) { + logger.error( + "Failed to upload files to snapshot repo {} for clone snapshot-v2 {} ", + repositoryName, + snapshotName + ); + stateWithoutSnapshotV2(newState); + leaveRepoLoop(repositoryName); + listener.onFailure(e); + } } - } + ); + }, e -> { + logger.error("Failed to update pinned timestamp for snapshot-v2 {} {} ", repositoryName, snapshotName); + stateWithoutSnapshotV2(newState); + leaveRepoLoop(repositoryName); + listener.onFailure(e); + }); + + cloneSnapshotPinnedTimestamp( + repositoryData, + sourceSnapshotId, + snapshot, + snapshotInfo.getPinnedTimestamp(), + pinnedTimestampListener ); - - }, listener::onFailure); + }, e -> { + logger.error("Failed to retrieve snapshot info for snapshot-v2 {} {} ", repositoryName, snapshotName); + stateWithoutSnapshotV2(newState); + leaveRepoLoop(repositoryName); + listener.onFailure(e); + }); } @Override @@ -1482,6 +1586,13 @@ public void applyClusterState(ClusterChangedEvent event) { // cluster-manager SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); final boolean newClusterManager = event.previousState().nodes().isLocalNodeElectedClusterManager() == false; + + if (newClusterManager && snapshotsInProgress.entries().isEmpty() == false) { + // clean up snapshot v2 in progress or clone v2 present. + // Snapshot v2 create and clone are sync operation . In case of cluster manager failures in midst , we won't + // send ack to caller and won't continue on new cluster manager . Caller will need to retry it. + stateWithoutSnapshotV2(event.state()); + } processExternalChanges( newClusterManager || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event) @@ -1593,7 +1704,14 @@ private void processExternalChanges(boolean changedNodes, boolean startShards) { @Override public ClusterState execute(ClusterState currentState) { RoutingTable routingTable = currentState.routingTable(); - final SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + // Removing shallow snapshots v2 as we we take care of these in stateWithoutSnapshotV2() + snapshots = SnapshotsInProgress.of( + snapshots.entries() + .stream() + .filter(snapshot -> snapshot.remoteStoreIndexShallowCopyV2() == false) + .collect(Collectors.toList()) + ); DiscoveryNodes nodes = currentState.nodes(); boolean changed = false; final EnumSet statesToUpdate; @@ -1650,7 +1768,7 @@ public ClusterState execute(ClusterState currentState) { changed = true; logger.debug("[{}] was found in dangling INIT or ABORTED state", snapshot); } else { - if (snapshot.state().completed() || completed(snapshot.shards().values())) { + if ((snapshot.state().completed() || completed(snapshot.shards().values()))) { finishedSnapshots.add(snapshot); } updatedSnapshotEntries.add(snapshot); @@ -2176,6 +2294,59 @@ private static ClusterState stateWithoutSnapshot(ClusterState state, Snapshot sn return readyDeletions(result).v1(); } + private void stateWithoutSnapshotV2(ClusterState state) { + SnapshotsInProgress snapshots = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + boolean changed = false; + ArrayList entries = new ArrayList<>(); + for (SnapshotsInProgress.Entry entry : snapshots.entries()) { + if (entry.remoteStoreIndexShallowCopyV2()) { + changed = true; + } else { + entries.add(entry); + } + } + if (changed) { + logger.info("Cleaning up in progress v2 snapshots now"); + clusterService.submitStateUpdateTask( + "remove in progress snapshot v2 after cluster manager switch", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + SnapshotsInProgress snapshots = state.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); + boolean changed = false; + ArrayList entries = new ArrayList<>(); + for (SnapshotsInProgress.Entry entry : snapshots.entries()) { + if (entry.remoteStoreIndexShallowCopyV2()) { + changed = true; + } else { + entries.add(entry); + } + } + if (changed) { + return ClusterState.builder(currentState) + .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(unmodifiableList(entries))) + .build(); + } else { + return currentState; + } + } + + @Override + public void onFailure(String source, Exception e) { + // execute never fails , so we should never hit this. + logger.warn( + () -> new ParameterizedMessage( + "failed to remove in progress snapshot v2 state after cluster manager switch {}", + e + ), + e + ); + } + } + ); + } + } + /** * Removes record of running snapshot from cluster state and notifies the listener when this action is complete. This method is only * used when the snapshot fails for some reason. During normal operation the snapshot repository will remove the @@ -3326,6 +3497,9 @@ public boolean assertAllListenersResolved() { + " on [" + localNode + "]"; + if (repositoryOperations.isEmpty() == false) { + logger.info("Not empty"); + } assert repositoryOperations.isEmpty() : "Found leaked snapshots to finalize " + repositoryOperations + " on [" + localNode + "]"; return true; } diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 81220ab171b34..d795fd252b7fc 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -53,6 +53,7 @@ import org.opensearch.core.service.ReportingService; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.gateway.remote.ClusterStateChecksum; import org.opensearch.node.Node; import java.io.IOException; @@ -118,6 +119,7 @@ public static class Names { public static final String REMOTE_RECOVERY = "remote_recovery"; public static final String REMOTE_STATE_READ = "remote_state_read"; public static final String INDEX_SEARCHER = "index_searcher"; + public static final String REMOTE_STATE_CHECKSUM = "remote_state_checksum"; } /** @@ -191,6 +193,7 @@ public static ThreadPoolType fromType(String type) { map.put(Names.REMOTE_RECOVERY, ThreadPoolType.SCALING); map.put(Names.REMOTE_STATE_READ, ThreadPoolType.SCALING); map.put(Names.INDEX_SEARCHER, ThreadPoolType.RESIZABLE); + map.put(Names.REMOTE_STATE_CHECKSUM, ThreadPoolType.FIXED); THREAD_POOL_TYPES = Collections.unmodifiableMap(map); } @@ -307,6 +310,10 @@ public ThreadPool( runnableTaskListener ) ); + builders.put( + Names.REMOTE_STATE_CHECKSUM, + new FixedExecutorBuilder(settings, Names.REMOTE_STATE_CHECKSUM, ClusterStateChecksum.COMPONENT_SIZE, 1000) + ); for (final ExecutorBuilder builder : customBuilders) { if (builders.containsKey(builder.name())) { diff --git a/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java b/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java index e634323d58269..3a3e8c964b6c5 100644 --- a/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ClusterConnectionManager.java @@ -64,6 +64,15 @@ public class ClusterConnectionManager implements ConnectionManager { private final ConcurrentMap connectedNodes = ConcurrentCollections.newConcurrentMap(); private final ConcurrentMap> pendingConnections = ConcurrentCollections.newConcurrentMap(); + /** + This set is used only by cluster-manager nodes. + Nodes are marked as pending disconnect right before cluster state publish phase. + They are cleared up as part of cluster state apply commit phase + This is to avoid connections from being made to nodes that are in the process of leaving the cluster + Note: If a disconnect is initiated while a connect is in progress, this Set will not handle this case. + Callers need to ensure that connects and disconnects are sequenced. + */ + private final Set pendingDisconnections = ConcurrentCollections.newConcurrentSet(); private final AbstractRefCounted connectingRefCounter = new AbstractRefCounted("connection manager") { @Override protected void closeInternal() { @@ -122,12 +131,19 @@ public void connectToNode( ConnectionValidator connectionValidator, ActionListener listener ) throws ConnectTransportException { + logger.trace("connecting to node [{}]", node); ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); if (node == null) { listener.onFailure(new ConnectTransportException(null, "can't connect to a null node")); return; } + // if node-left is still in progress, we fail the connect request early + if (pendingDisconnections.contains(node)) { + listener.onFailure(new IllegalStateException("cannot make a new connection as disconnect to node [" + node + "] is pending")); + return; + } + if (connectingRefCounter.tryIncRef() == false) { listener.onFailure(new IllegalStateException("connection manager is closed")); return; @@ -170,6 +186,7 @@ public void connectToNode( conn.addCloseListener(ActionListener.wrap(() -> { logger.trace("unregistering {} after connection close and marking as disconnected", node); connectedNodes.remove(node, finalConnection); + pendingDisconnections.remove(node); connectionListener.onNodeDisconnected(node, conn); })); } @@ -226,6 +243,19 @@ public void disconnectFromNode(DiscoveryNode node) { // if we found it and removed it we close nodeChannels.close(); } + pendingDisconnections.remove(node); + logger.trace("Removed node [{}] from pending disconnections list", node); + } + + @Override + public void setPendingDisconnection(DiscoveryNode node) { + logger.trace("marking disconnection as pending for node: [{}]", node); + pendingDisconnections.add(node); + } + + @Override + public void clearPendingDisconnections() { + pendingDisconnections.clear(); } /** diff --git a/server/src/main/java/org/opensearch/transport/ConnectionManager.java b/server/src/main/java/org/opensearch/transport/ConnectionManager.java index 10cfc2907098f..ebd5ccf29c8cc 100644 --- a/server/src/main/java/org/opensearch/transport/ConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/ConnectionManager.java @@ -65,6 +65,10 @@ void connectToNode( void disconnectFromNode(DiscoveryNode node); + void setPendingDisconnection(DiscoveryNode node); + + void clearPendingDisconnections(); + Set getAllConnectedNodes(); int size(); diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java index bd646f10df517..52f29bea8050d 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionManager.java @@ -114,6 +114,16 @@ public void disconnectFromNode(DiscoveryNode node) { delegate.disconnectFromNode(node); } + @Override + public void setPendingDisconnection(DiscoveryNode node) { + delegate.setPendingDisconnection(node); + } + + @Override + public void clearPendingDisconnections() { + delegate.clearPendingDisconnections(); + } + @Override public ConnectionProfile getConnectionProfile() { return delegate.getConnectionProfile(); diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index fff6d82b23c7e..fe8631aa5ca3d 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -773,6 +773,18 @@ public void disconnectFromNode(DiscoveryNode node) { connectionManager.disconnectFromNode(node); } + public void setPendingDisconnection(DiscoveryNode node) { + connectionManager.setPendingDisconnection(node); + } + + /** + * Wipes out all pending disconnections. + * This is called on cluster-manager failover to remove stale entries + */ + public void clearPendingDisconnections() { + connectionManager.clearPendingDisconnections(); + } + public void addMessageListener(TransportMessageListener listener) { messageListener.listeners.add(listener); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 11902728eed07..34065daff2b8a 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -95,6 +95,7 @@ import org.opensearch.node.NodeResourceUsageStats; import org.opensearch.node.NodesResourceUsageStats; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.node.remotestore.RemoteStoreNodeStats; import org.opensearch.ratelimitting.admissioncontrol.controllers.AdmissionController; import org.opensearch.ratelimitting.admissioncontrol.controllers.CpuBasedAdmissionController; import org.opensearch.ratelimitting.admissioncontrol.enums.AdmissionControlActionType; @@ -614,6 +615,14 @@ public void testSerialization() throws IOException { } else { assertEquals(nodeCacheStats, deserializedNodeCacheStats); } + + RemoteStoreNodeStats remoteStoreNodeStats = nodeStats.getRemoteStoreNodeStats(); + RemoteStoreNodeStats deserializedRemoteStoreNodeStats = deserializedNodeStats.getRemoteStoreNodeStats(); + if (remoteStoreNodeStats == null) { + assertNull(deserializedRemoteStoreNodeStats); + } else { + assertEquals(remoteStoreNodeStats, deserializedRemoteStoreNodeStats); + } } } } @@ -996,6 +1005,16 @@ public void apply(String action, AdmissionControlActionType admissionControlActi nodeCacheStats = new NodeCacheStats(cacheStatsMap, flags); } + RemoteStoreNodeStats remoteStoreNodeStats = null; + if (frequently()) { + remoteStoreNodeStats = new RemoteStoreNodeStats() { + @Override + public long getLastSuccessfulFetchOfPinnedTimestamps() { + return 123456L; + } + }; + } + // TODO: Only remote_store based aspects of NodeIndicesStats are being tested here. // It is possible to test other metrics in NodeIndicesStats as well since it extends Writeable now return new NodeStats( @@ -1027,7 +1046,8 @@ public void apply(String action, AdmissionControlActionType admissionControlActi segmentReplicationRejectionStats, null, admissionControlStats, - nodeCacheStats + nodeCacheStats, + remoteStoreNodeStats ); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index 1c4a77905d73f..823661ba14abf 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -349,6 +349,7 @@ private ClusterStatsNodeResponse createClusterStatsNodeResponse( null, null, null, + null, null ); if (defaultBehavior) { diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 40514c526f190..acda1445bacbb 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -42,6 +42,8 @@ import org.opensearch.geometry.LinearRing; import org.opensearch.index.query.GeoShapeQueryBuilder; import org.opensearch.index.query.QueryBuilders; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.search.AbstractSearchTestCase; import org.opensearch.search.Scroll; import org.opensearch.search.builder.PointInTimeBuilder; @@ -50,14 +52,18 @@ import org.opensearch.search.rescore.QueryRescorerBuilder; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; +import org.opensearch.test.rest.FakeRestRequest; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.function.IntConsumer; import static java.util.Collections.emptyMap; +import static org.opensearch.action.search.SearchType.DFS_QUERY_THEN_FETCH; import static org.opensearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.hamcrest.Matchers.equalTo; +import static org.mockito.Mockito.mock; public class SearchRequestTests extends AbstractSearchTestCase { @@ -242,6 +248,19 @@ public void testCopyConstructor() throws IOException { assertNotSame(deserializedRequest, searchRequest); } + public void testParseSearchRequestWithUnsupportedSearchType() throws IOException { + RestRequest restRequest = new FakeRestRequest(); + SearchRequest searchRequest = createSearchRequest(); + IntConsumer setSize = mock(IntConsumer.class); + restRequest.params().put("search_type", "query_and_fetch"); + + IllegalArgumentException exception = expectThrows( + IllegalArgumentException.class, + () -> RestSearchAction.parseSearchRequest(searchRequest, restRequest, null, namedWriteableRegistry, setSize) + ); + assertEquals("Unsupported search type [query_and_fetch]", exception.getMessage()); + } + public void testEqualsAndHashcode() throws IOException { checkEqualsAndHashCode(createSearchRequest(), SearchRequest::new, this::mutate); } @@ -268,10 +287,7 @@ private SearchRequest mutate(SearchRequest searchRequest) { ); mutators.add( () -> mutation.searchType( - randomValueOtherThan( - searchRequest.searchType(), - () -> randomFrom(SearchType.DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH) - ) + randomValueOtherThan(searchRequest.searchType(), () -> randomFrom(DFS_QUERY_THEN_FETCH, SearchType.QUERY_THEN_FETCH)) ) ); mutators.add(() -> mutation.source(randomValueOtherThan(searchRequest.source(), this::createSearchSourceBuilder))); diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 5539dd26dd52d..cd050fb346563 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -195,6 +195,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -226,6 +227,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -257,6 +259,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -319,6 +322,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -350,6 +354,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -381,6 +386,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java index 4cf82f1dabab3..4500860c937ea 100644 --- a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java @@ -35,6 +35,9 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LoggerContext; +import org.apache.logging.log4j.core.config.Configuration; +import org.apache.logging.log4j.core.config.LoggerConfig; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; @@ -53,9 +56,11 @@ import org.opensearch.telemetry.tracing.noop.NoopTracer; import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.TestLogsAppender; import org.opensearch.test.junit.annotations.TestLogging; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ClusterConnectionManager; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.Transport; @@ -69,6 +74,7 @@ import org.junit.Before; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -77,6 +83,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Predicate; import static java.util.Collections.emptySet; @@ -86,12 +93,15 @@ import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.opensearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; public class NodeConnectionsServiceTests extends OpenSearchTestCase { private ThreadPool threadPool; private TransportService transportService; private Map> nodeConnectionBlocks; + private TestLogsAppender testLogsAppender; + private LoggerContext loggerContext; private List generateNodes() { List nodes = new ArrayList<>(); @@ -490,6 +500,108 @@ public void testDebugLogging() throws IllegalAccessException { } } + public void testConnectionCheckerRetriesIfPendingDisconnection() throws InterruptedException { + final Settings.Builder settings = Settings.builder(); + final long reconnectIntervalMillis = 50; + settings.put(CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.getKey(), reconnectIntervalMillis + "ms"); + + final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue( + builder().put(NODE_NAME_SETTING.getKey(), "node").build(), + random() + ); + + MockTransport transport = new MockTransport(deterministicTaskQueue.getThreadPool()); + TestTransportService transportService = new TestTransportService(transport, deterministicTaskQueue.getThreadPool()); + transportService.start(); + transportService.acceptIncomingRequests(); + + final TestNodeConnectionsService service = new TestNodeConnectionsService( + settings.build(), + deterministicTaskQueue.getThreadPool(), + transportService + ); + service.start(); + + // setup the connections + final DiscoveryNode node = new DiscoveryNode("node0", buildNewFakeTransportAddress(), Version.CURRENT); + + final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node).build(); + + final AtomicBoolean connectionCompleted = new AtomicBoolean(); + service.connectToNodes(nodes, () -> connectionCompleted.set(true)); + deterministicTaskQueue.runAllRunnableTasks(); + assertTrue(connectionCompleted.get()); + + // reset any logs as we want to assert for exceptions that show up after this + // reset connect to node count to assert for later + logger.info("--> resetting captured logs and counters"); + testLogsAppender.clearCapturedLogs(); + // this ensures we only track connection attempts that happen after the disconnection + transportService.resetConnectToNodeCallCount(); + + // block connection checker reconnection attempts until after we set pending disconnections + logger.info("--> disabling connection checker, and triggering disconnect"); + service.setShouldReconnect(false); + transportService.disconnectFromNode(node); + + // set pending disconnections to true to fail future reconnection attempts + final long maxDisconnectionTime = 1000; + deterministicTaskQueue.scheduleNow(new Runnable() { + @Override + public void run() { + logger.info("--> setting pending disconnections to fail next connection attempts"); + service.setPendingDisconnections(new HashSet<>(Collections.singleton(node))); + } + + @Override + public String toString() { + return "scheduled disconnection of " + node; + } + }); + // our task queue will have the first task as the runnable to set pending disconnections + // here we re-enable the connection checker to enqueue next tasks for attempting reconnection + logger.info("--> re-enabling reconnection checker"); + service.setShouldReconnect(true); + + final long maxReconnectionTime = 2000; + final int expectedReconnectionAttempts = 10; + + // this will first run the task to set the pending disconnections, then will execute the reconnection tasks + // exit early when we have enough reconnection attempts + logger.info("--> running tasks in order until expected reconnection attempts"); + runTasksInOrderUntilExpectedReconnectionAttempts( + deterministicTaskQueue, + maxDisconnectionTime + maxReconnectionTime, + transportService, + expectedReconnectionAttempts + ); + logger.info("--> verifying that connectionchecker tried to reconnect"); + + // assert that the connections failed + assertFalse("connected to " + node, transportService.nodeConnected(node)); + + // assert that we saw at least the required number of reconnection attempts, and the exceptions that showed up are as expected + logger.info("--> number of reconnection attempts: {}", transportService.getConnectToNodeCallCount()); + assertThat( + "Did not see enough reconnection attempts from connection checker", + transportService.getConnectToNodeCallCount(), + greaterThan(expectedReconnectionAttempts) + ); + boolean logFound = testLogsAppender.waitForLog("failed to connect", 1, TimeUnit.SECONDS) + && testLogsAppender.waitForLog( + "IllegalStateException: cannot make a new connection as disconnect to node", + 1, + TimeUnit.SECONDS + ); + assertTrue("Expected log for reconnection failure was not found in the required time period", logFound); + + // clear the pending disconnections and ensure the connection gets re-established automatically by connectionchecker + logger.info("--> clearing pending disconnections to allow connections to re-establish"); + service.clearPendingDisconnections(); + runTasksUntil(deterministicTaskQueue, maxDisconnectionTime + maxReconnectionTime + 2 * reconnectIntervalMillis); + assertConnectedExactlyToNodes(transportService, nodes); + } + private void runTasksUntil(DeterministicTaskQueue deterministicTaskQueue, long endTimeMillis) { while (deterministicTaskQueue.getCurrentTimeMillis() < endTimeMillis) { if (deterministicTaskQueue.hasRunnableTasks() && randomBoolean()) { @@ -501,6 +613,24 @@ private void runTasksUntil(DeterministicTaskQueue deterministicTaskQueue, long e deterministicTaskQueue.runAllRunnableTasks(); } + private void runTasksInOrderUntilExpectedReconnectionAttempts( + DeterministicTaskQueue deterministicTaskQueue, + long endTimeMillis, + TestTransportService transportService, + int expectedReconnectionAttempts + ) { + // break the loop if we timeout or if we have enough reconnection attempts + while ((deterministicTaskQueue.getCurrentTimeMillis() < endTimeMillis) + && (transportService.getConnectToNodeCallCount() <= expectedReconnectionAttempts)) { + if (deterministicTaskQueue.hasRunnableTasks() && randomBoolean()) { + deterministicTaskQueue.runNextTask(); + } else if (deterministicTaskQueue.hasDeferredTasks()) { + deterministicTaskQueue.advanceTime(); + } + } + deterministicTaskQueue.runAllRunnableTasksInEnqueuedOrder(); + } + private void ensureConnections(NodeConnectionsService service) { final PlainActionFuture future = new PlainActionFuture<>(); service.ensureConnections(() -> future.onResponse(null)); @@ -526,6 +656,16 @@ private void assertConnected(TransportService transportService, Iterable messagesToCapture = Arrays.asList("failed to connect", "IllegalStateException"); + testLogsAppender = new TestLogsAppender(messagesToCapture); + loggerContext = (LoggerContext) LogManager.getContext(false); + Configuration config = loggerContext.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(NodeConnectionsService.class.getName()); + loggerConfig.addAppender(testLogsAppender, null, null); + loggerConfig = config.getLoggerConfig(ClusterConnectionManager.class.getName()); + loggerConfig.addAppender(testLogsAppender, null, null); + loggerContext.updateLoggers(); ThreadPool threadPool = new TestThreadPool(getClass().getName()); this.threadPool = threadPool; nodeConnectionBlocks = newConcurrentMap(); @@ -537,6 +677,14 @@ public void setUp() throws Exception { @Override @After public void tearDown() throws Exception { + testLogsAppender.clearCapturedLogs(); + loggerContext = (LoggerContext) LogManager.getContext(false); + Configuration config = loggerContext.getConfiguration(); + LoggerConfig loggerConfig = config.getLoggerConfig(NodeConnectionsService.class.getName()); + loggerConfig.removeAppender(testLogsAppender.getName()); + loggerConfig = config.getLoggerConfig(ClusterConnectionManager.class.getName()); + loggerConfig.removeAppender(testLogsAppender.getName()); + loggerContext.updateLoggers(); transportService.stop(); ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); threadPool = null; @@ -545,6 +693,8 @@ public void tearDown() throws Exception { private final class TestTransportService extends TransportService { + private final AtomicInteger connectToNodeCallCount = new AtomicInteger(0); + private TestTransportService(Transport transport, ThreadPool threadPool) { super( Settings.EMPTY, @@ -588,6 +738,47 @@ public void connectToNode(DiscoveryNode node, ActionListener listener) thr } else { super.connectToNode(node, listener); } + logger.info("calling connectToNode"); + connectToNodeCallCount.incrementAndGet(); + } + + public int getConnectToNodeCallCount() { + return connectToNodeCallCount.get(); + } + + public void resetConnectToNodeCallCount() { + connectToNodeCallCount.set(0); + } + } + + private class TestNodeConnectionsService extends NodeConnectionsService { + private boolean shouldReconnect = true; + + public TestNodeConnectionsService(Settings settings, ThreadPool threadPool, TransportService transportService) { + super(settings, threadPool, transportService); + } + + public void setShouldReconnect(boolean shouldReconnect) { + this.shouldReconnect = shouldReconnect; + } + + @Override + protected void doStart() { + final StoppableConnectionChecker connectionChecker = new StoppableConnectionChecker(); + this.connectionChecker = connectionChecker; + connectionChecker.scheduleNextCheck(); + } + + class StoppableConnectionChecker extends NodeConnectionsService.ConnectionChecker { + @Override + protected void doRun() { + if (connectionChecker == this && shouldReconnect) { + connectDisconnectedTargets(this::scheduleNextCheck); + } else { + // Skip reconnection attempt but still schedule the next check + scheduleNextCheck(); + } + } } } diff --git a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java index db97c3ece94ba..c25150873a1ce 100644 --- a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java +++ b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java @@ -35,10 +35,13 @@ import org.opensearch.Version; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.nodes.BaseNodeResponse; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.telemetry.metrics.Counter; +import org.opensearch.telemetry.metrics.MetricsRegistry; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -54,6 +57,12 @@ import static java.util.Collections.emptyMap; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.anyDouble; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class AsyncShardFetchTests extends OpenSearchTestCase { private final DiscoveryNode node1 = new DiscoveryNode( @@ -78,13 +87,29 @@ public class AsyncShardFetchTests extends OpenSearchTestCase { private ThreadPool threadPool; private TestFetch test; + private Counter asyncFetchSuccessCounter; + private Counter asyncFetchFailureCounter; + private Counter dummyCounter; @Override @Before public void setUp() throws Exception { super.setUp(); this.threadPool = new TestThreadPool(getTestName()); - this.test = new TestFetch(threadPool); + final MetricsRegistry metricsRegistry = mock(MetricsRegistry.class); + this.asyncFetchFailureCounter = mock(Counter.class); + this.asyncFetchSuccessCounter = mock(Counter.class); + this.dummyCounter = mock(Counter.class); + when(metricsRegistry.createCounter(anyString(), anyString(), anyString())).thenAnswer(invocationOnMock -> { + String counterName = (String) invocationOnMock.getArguments()[0]; + if (counterName.contains("async.fetch.success.count")) { + return asyncFetchSuccessCounter; + } else if (counterName.contains("async.fetch.failure.count")) { + return asyncFetchFailureCounter; + } + return dummyCounter; + }); + this.test = new TestFetch(threadPool, metricsRegistry); } @After @@ -100,14 +125,26 @@ public void testClose() throws Exception { AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter remains 0 because fetch is ongoing + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // fire a response, wait on reroute incrementing test.fireSimulationAndWait(node1.getId()); + // counter goes up because fetch completed + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); + // verify we get back the data node assertThat(test.reroute.get(), equalTo(1)); test.close(); try { test.fetchData(nodes, emptyMap()); + // counter should not go up when calling fetchData since fetch never completed + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); fail("fetch data should fail when closed"); } catch (IllegalStateException e) { // all is well @@ -125,12 +162,21 @@ public void testFullCircleSingleNodeSuccess() throws Exception { // fire a response, wait on reroute incrementing test.fireSimulationAndWait(node1.getId()); + // total counter goes up by 1 after success + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); + // verify we get back the data node assertThat(test.reroute.get(), equalTo(1)); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); + // counter remains same because fetchData does not trigger new async fetch + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); } public void testFullCircleSingleNodeFailure() throws Exception { @@ -145,24 +191,47 @@ public void testFullCircleSingleNodeFailure() throws Exception { // fire a response, wait on reroute incrementing test.fireSimulationAndWait(node1.getId()); + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); + // failure, fetched data exists, but has no data assertThat(test.reroute.get(), equalTo(1)); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(0)); + // counter remains same because fetchData does not trigger new async fetch + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); // on failure, we reset the failure on a successive call to fetchData, and try again afterwards test.addSimulation(node1.getId(), response1); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); + // No additional failure, empty data so no change in counter + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); test.fireSimulationAndWait(node1.getId()); + // Success counter will increase + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); + // 2 reroutes, cause we have a failure that we clear assertThat(test.reroute.get(), equalTo(3)); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); + // counter remains same because fetchData does not trigger new async fetch + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); } public void testIgnoreResponseFromDifferentRound() throws Exception { @@ -173,20 +242,40 @@ public void testIgnoreResponseFromDifferentRound() throws Exception { AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter 0 because fetchData is not completed + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // handle a response with incorrect round id, wait on reroute incrementing test.processAsyncFetch(Collections.singletonList(response1), Collections.emptyList(), 0); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(1)); + // success counter increments to 1 because we called processAsyncFetch with a valid response, even though the round was incorrect + // failure counter also increments by 1 with empty list + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(0.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); // fire a response (with correct round id), wait on reroute incrementing test.fireSimulationAndWait(node1.getId()); + // success counter now goes up by 1 because fetchData completed + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(0.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); + // verify we get back the data node assertThat(test.reroute.get(), equalTo(2)); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); + // total counter remains same because fetchdata does not trigger new async fetch + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(0.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); } public void testIgnoreFailureFromDifferentRound() throws Exception { @@ -198,6 +287,9 @@ public void testIgnoreFailureFromDifferentRound() throws Exception { AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter 0 because fetchData still ongoing + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // handle a failure with incorrect round id, wait on reroute incrementing test.processAsyncFetch( @@ -207,14 +299,30 @@ public void testIgnoreFailureFromDifferentRound() throws Exception { ); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(1)); + // success counter called with empty list + // failure counter goes up by 1 because of the failure + verify(asyncFetchSuccessCounter, times(1)).add(0.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); // fire a response, wait on reroute incrementing test.fireSimulationAndWait(node1.getId()); + // failure counter goes up by 1 because of the failure + verify(asyncFetchSuccessCounter, times(1)).add(0.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(2)).add(1.0); + verify(asyncFetchFailureCounter, times(2)).add(anyDouble()); // failure, fetched data exists, but has no data assertThat(test.reroute.get(), equalTo(2)); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(0)); + // counters remain same because fetchData does not trigger new async fetch + verify(asyncFetchSuccessCounter, times(1)).add(0.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(2)).add(1.0); + verify(asyncFetchFailureCounter, times(2)).add(anyDouble()); } public void testTwoNodesOnSetup() throws Exception { @@ -226,16 +334,32 @@ public void testTwoNodesOnSetup() throws Exception { AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter 0 because fetch ongoing + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // fire the first response, it should trigger a reroute test.fireSimulationAndWait(node1.getId()); + // counter 1 because one fetch completed + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); + // there is still another on going request, so no data assertThat(test.getNumberOfInFlightFetches(), equalTo(1)); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); + // counter still 1 because fetchData did not trigger new async fetch + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); + // counter 2 because 2 fetches completed + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // no more ongoing requests, we should fetch the data assertThat(test.reroute.get(), equalTo(2)); fetchData = test.fetchData(nodes, emptyMap()); @@ -243,6 +367,10 @@ public void testTwoNodesOnSetup() throws Exception { assertThat(fetchData.getData().size(), equalTo(2)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); assertThat(fetchData.getData().get(node2), sameInstance(response2)); + // counter still 2 because fetchData call did not trigger new async fetch + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); } public void testTwoNodesOnSetupAndFailure() throws Exception { @@ -254,34 +382,59 @@ public void testTwoNodesOnSetupAndFailure() throws Exception { AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter 0 because both fetches ongoing + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // fire the first response, it should trigger a reroute test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(1)); fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); + // counter 1 because one fetch completed + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); + // failure counter up by 1 because one fetch failed + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); assertThat(test.reroute.get(), equalTo(2)); + // since one of those failed, we should only have one entry fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); + // success and failure counters same because fetchData did not trigger new async fetch + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(1)).add(1.0); + verify(asyncFetchFailureCounter, times(1)).add(anyDouble()); } public void testTwoNodesAddedInBetween() throws Exception { DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); test.addSimulation(node1.getId(), response1); - // no fetched data, 2 requests still on going + // no fetched data, request still on going AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter 0 because both fetches ongoing + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // fire the first response, it should trigger a reroute test.fireSimulationAndWait(node1.getId()); + // counter 1 because fetch completed + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // now, add a second node to the nodes, it should add it to the ongoing requests nodes = DiscoveryNodes.builder(nodes).add(node2).build(); @@ -289,16 +442,28 @@ public void testTwoNodesAddedInBetween() throws Exception { // no fetch data, has a new node introduced fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); + // counter still 1 because second fetch ongoing + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // fire the second simulation, this should allow us to get the data test.fireSimulationAndWait(node2.getId()); + // counter now 2 because 2 fetches completed + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); - // since one of those failed, we should only have one entry + // since both succeeded, we should have 2 entries fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(2)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); assertThat(fetchData.getData().get(node2), sameInstance(response2)); + // counter still 2 because fetchData did not trigger new async fetch + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); } public void testClearCache() throws Exception { @@ -312,21 +477,36 @@ public void testClearCache() throws Exception { AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter 0 because fetch ongoing + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(1)); + // counter 1 because 1 fetch completed + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // verify we get back right data from node fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); + // counter still 1 because a new fetch is not called + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // second fetch gets same data fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1)); + // counter still 1 because a new fetch is not called + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); test.clearCacheForNode(node1.getId()); @@ -336,15 +516,27 @@ public void testClearCache() throws Exception { // no fetched data, new request on going fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); + // counter still 1 because new fetch is still ongoing + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(2)); + // counter now 2 because second fetch completed + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // verify we get new data back fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1_2)); + // counter still 2 because fetchData did not trigger new async fetch + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); } public void testConcurrentRequestAndClearCache() throws Exception { @@ -355,12 +547,19 @@ public void testConcurrentRequestAndClearCache() throws Exception { AsyncShardFetch.FetchResult fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); assertThat(test.reroute.get(), equalTo(0)); + // counter 0 because fetch ongoing + verify(asyncFetchSuccessCounter, times(0)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // clear cache while request is still on going, before it is processed test.clearCacheForNode(node1.getId()); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(1)); + // counter 1 because fetch completed, even though cache was wiped + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // prepare next request test.addSimulation(node1.getId(), response1_2); @@ -368,15 +567,27 @@ public void testConcurrentRequestAndClearCache() throws Exception { // verify still no fetched data, request still on going fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(false)); + // counter unchanged because fetch ongoing + verify(asyncFetchSuccessCounter, times(1)).add(1.0); + verify(asyncFetchSuccessCounter, times(1)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); test.fireSimulationAndWait(node1.getId()); assertThat(test.reroute.get(), equalTo(2)); + // counter 2 because second fetch completed + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); // verify we get new data back fetchData = test.fetchData(nodes, emptyMap()); assertThat(fetchData.hasData(), equalTo(true)); assertThat(fetchData.getData().size(), equalTo(1)); assertThat(fetchData.getData().get(node1), sameInstance(response1_2)); + // counter unchanged because fetchData does not trigger new async fetch + verify(asyncFetchSuccessCounter, times(2)).add(1.0); + verify(asyncFetchSuccessCounter, times(2)).add(anyDouble()); + verify(asyncFetchFailureCounter, times(0)).add(anyDouble()); } @@ -398,8 +609,15 @@ static class Entry { private final Map simulations = new ConcurrentHashMap<>(); private AtomicInteger reroute = new AtomicInteger(); - TestFetch(ThreadPool threadPool) { - super(LogManager.getLogger(TestFetch.class), "test", new ShardId("test", "_na_", 1), "", null); + TestFetch(ThreadPool threadPool, MetricsRegistry metricsRegistry) { + super( + LogManager.getLogger(TestFetch.class), + "test", + new ShardId("test", "_na_", 1), + "", + null, + new ClusterManagerMetrics(metricsRegistry) + ); this.threadPool = threadPool; } diff --git a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java index 5ea5241762753..efdb3076f419c 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -1244,14 +1244,72 @@ public void testGatewayForRemoteStateForInitialBootstrapBlocksApplied() throws I } } - private MockGatewayMetaState newGatewayForRemoteState( + public void testGatewayMetaStateRemoteStateDownloadRetries() throws IOException { + MockGatewayMetaState gateway = null; + MockGatewayMetaState gatewayMetaStateSpy = null; + try { + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenThrow( + new IllegalStateException("unable to download cluster state") + ).thenReturn(RemoteRestoreResult.build("test-cluster-uuid", null, ClusterState.EMPTY_STATE)); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = initializeGatewayForRemoteState(true); + gatewayMetaStateSpy = Mockito.spy(gateway); + startGatewayForRemoteState( + gatewayMetaStateSpy, + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE + ); + verify(gatewayMetaStateSpy, times(2)).restoreClusterState(Mockito.any(), Mockito.any(), Mockito.any()); + } finally { + IOUtils.close(gatewayMetaStateSpy); + } + } + + public void testGatewayMetaStateRemoteStateDownloadFailure() throws IOException { + MockGatewayMetaState gateway = null; + final MockGatewayMetaState gatewayMetaStateSpy; + try { + RemoteClusterStateService remoteClusterStateService = mock(RemoteClusterStateService.class); + when(remoteClusterStateService.getLastKnownUUIDFromRemote("test-cluster")).thenReturn("test-cluster-uuid"); + RemoteStoreRestoreService remoteStoreRestoreService = mock(RemoteStoreRestoreService.class); + when(remoteStoreRestoreService.restore(any(), any(), anyBoolean(), any())).thenThrow( + new IllegalStateException("unable to download cluster state") + ); + final PersistedStateRegistry persistedStateRegistry = persistedStateRegistry(); + gateway = initializeGatewayForRemoteState(true); + gatewayMetaStateSpy = Mockito.spy(gateway); + assertThrows( + Error.class, + () -> startGatewayForRemoteState( + gatewayMetaStateSpy, + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + ClusterState.EMPTY_STATE + ) + ); + verify(gatewayMetaStateSpy, times(5)).restoreClusterState(Mockito.any(), Mockito.any(), Mockito.any()); + } finally { + IOUtils.close(gateway); + } + } + + private MockGatewayMetaState initializeGatewayForRemoteState(boolean prepareFullState) { + return new MockGatewayMetaState(localNode, bigArrays, prepareFullState); + } + + private MockGatewayMetaState startGatewayForRemoteState( + MockGatewayMetaState gateway, RemoteClusterStateService remoteClusterStateService, RemoteStoreRestoreService remoteStoreRestoreService, PersistedStateRegistry persistedStateRegistry, - ClusterState currentState, - boolean prepareFullState + ClusterState currentState ) throws IOException { - MockGatewayMetaState gateway = new MockGatewayMetaState(localNode, bigArrays, prepareFullState); String randomRepoName = "randomRepoName"; String stateRepoTypeAttributeKey = String.format( Locale.getDefault(), @@ -1305,6 +1363,24 @@ private MockGatewayMetaState newGatewayForRemoteState( return gateway; } + private MockGatewayMetaState newGatewayForRemoteState( + RemoteClusterStateService remoteClusterStateService, + RemoteStoreRestoreService remoteStoreRestoreService, + PersistedStateRegistry persistedStateRegistry, + ClusterState currentState, + boolean prepareFullState + ) throws IOException { + MockGatewayMetaState gatewayMetaState = initializeGatewayForRemoteState(prepareFullState); + startGatewayForRemoteState( + gatewayMetaState, + remoteClusterStateService, + remoteStoreRestoreService, + persistedStateRegistry, + currentState + ); + return gatewayMetaState; + } + private static BigArrays getBigArrays() { return usually() ? BigArrays.NON_RECYCLING_INSTANCE diff --git a/server/src/test/java/org/opensearch/gateway/ShardBatchCacheTests.java b/server/src/test/java/org/opensearch/gateway/ShardBatchCacheTests.java index 12030ad41d508..39c4ee8c8ca06 100644 --- a/server/src/test/java/org/opensearch/gateway/ShardBatchCacheTests.java +++ b/server/src/test/java/org/opensearch/gateway/ShardBatchCacheTests.java @@ -8,6 +8,7 @@ package org.opensearch.gateway; +import org.opensearch.cluster.ClusterManagerMetrics; import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; @@ -19,6 +20,7 @@ import org.opensearch.gateway.TransportNodesGatewayStartedShardHelper.GatewayStartedShard; import org.opensearch.gateway.TransportNodesListGatewayStartedShardsBatch.NodeGatewayStartedShardsBatch; import org.opensearch.indices.store.ShardAttributes; +import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import java.util.ArrayList; import java.util.HashMap; @@ -52,7 +54,8 @@ public void setupShardBatchCache(String batchId, int numberOfShards) { GatewayStartedShard.class, new GatewayStartedShard(null, false, null, null), GatewayStartedShard::isEmpty, - new ShardBatchResponseFactory<>(true) + new ShardBatchResponseFactory<>(true), + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java index 3f9aa1245cab3..09c2933680be3 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterMetadataManifestTests.java @@ -34,6 +34,9 @@ import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; import org.opensearch.test.EqualsHashCodeTestUtils; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; import java.io.IOException; import java.util.ArrayList; @@ -64,6 +67,14 @@ public class ClusterMetadataManifestTests extends OpenSearchTestCase { + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @After + public void teardown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + public void testClusterMetadataManifestXContentV0() throws IOException { UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path", CODEC_V0); ClusterMetadataManifest originalManifest = ClusterMetadataManifest.builder() @@ -214,7 +225,7 @@ public void testClusterMetadataManifestSerializationEqualsHashCode() { "indicesRoutingDiffPath" ) ) - .checksum(new ClusterStateChecksum(createClusterState())) + .checksum(new ClusterStateChecksum(createClusterState(), threadPool)) .build(); { // Mutate Cluster Term EqualsHashCodeTestUtils.checkEqualsAndHashCode( @@ -647,7 +658,7 @@ public void testClusterMetadataManifestXContentV4() throws IOException { UploadedIndexMetadata uploadedIndexMetadata = new UploadedIndexMetadata("test-index", "test-uuid", "/test/upload/path"); UploadedMetadataAttribute uploadedMetadataAttribute = new UploadedMetadataAttribute("attribute_name", "testing_attribute"); final StringKeyDiffProvider routingTableIncrementalDiff = Mockito.mock(StringKeyDiffProvider.class); - ClusterStateChecksum checksum = new ClusterStateChecksum(createClusterState()); + ClusterStateChecksum checksum = new ClusterStateChecksum(createClusterState(), threadPool); ClusterMetadataManifest originalManifest = ClusterMetadataManifest.builder() .clusterTerm(1L) .stateVersion(1L) diff --git a/server/src/test/java/org/opensearch/gateway/remote/ClusterStateChecksumTests.java b/server/src/test/java/org/opensearch/gateway/remote/ClusterStateChecksumTests.java index 0203e56dd2d5c..9b98187053a39 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/ClusterStateChecksumTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/ClusterStateChecksumTests.java @@ -34,6 +34,9 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; import java.io.IOException; import java.util.EnumSet; @@ -41,14 +44,21 @@ import java.util.Map; public class ClusterStateChecksumTests extends OpenSearchTestCase { + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @After + public void teardown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } public void testClusterStateChecksumEmptyClusterState() { - ClusterStateChecksum checksum = new ClusterStateChecksum(ClusterState.EMPTY_STATE); + ClusterStateChecksum checksum = new ClusterStateChecksum(ClusterState.EMPTY_STATE, threadPool); assertNotNull(checksum); } public void testClusterStateChecksum() { - ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState()); + ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState(), threadPool); assertNotNull(checksum); assertTrue(checksum.routingTableChecksum != 0); assertTrue(checksum.nodesChecksum != 0); @@ -65,8 +75,8 @@ public void testClusterStateChecksum() { } public void testClusterStateMatchChecksum() { - ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState()); - ClusterStateChecksum newChecksum = new ClusterStateChecksum(generateClusterState()); + ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState(), threadPool); + ClusterStateChecksum newChecksum = new ClusterStateChecksum(generateClusterState(), threadPool); assertNotNull(checksum); assertNotNull(newChecksum); assertEquals(checksum.routingTableChecksum, newChecksum.routingTableChecksum); @@ -84,7 +94,7 @@ public void testClusterStateMatchChecksum() { } public void testXContentConversion() throws IOException { - ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState()); + ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState(), threadPool); final XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); checksum.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -97,7 +107,7 @@ public void testXContentConversion() throws IOException { } public void testSerialization() throws IOException { - ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState()); + ClusterStateChecksum checksum = new ClusterStateChecksum(generateClusterState(), threadPool); BytesStreamOutput output = new BytesStreamOutput(); checksum.writeTo(output); @@ -109,10 +119,10 @@ public void testSerialization() throws IOException { public void testGetMismatchEntities() { ClusterState clsState1 = generateClusterState(); - ClusterStateChecksum checksum = new ClusterStateChecksum(clsState1); + ClusterStateChecksum checksum = new ClusterStateChecksum(clsState1, threadPool); assertTrue(checksum.getMismatchEntities(checksum).isEmpty()); - ClusterStateChecksum checksum2 = new ClusterStateChecksum(clsState1); + ClusterStateChecksum checksum2 = new ClusterStateChecksum(clsState1, threadPool); assertTrue(checksum.getMismatchEntities(checksum2).isEmpty()); ClusterState clsState2 = ClusterState.builder(ClusterName.DEFAULT) @@ -122,7 +132,7 @@ public void testGetMismatchEntities() { .customs(Map.of()) .metadata(Metadata.EMPTY_METADATA) .build(); - ClusterStateChecksum checksum3 = new ClusterStateChecksum(clsState2); + ClusterStateChecksum checksum3 = new ClusterStateChecksum(clsState2, threadPool); List mismatches = checksum.getMismatchEntities(checksum3); assertFalse(mismatches.isEmpty()); assertEquals(11, mismatches.size()); @@ -151,8 +161,8 @@ public void testGetMismatchEntitiesUnorderedInput() { ClusterState state2 = ClusterState.builder(state1).nodes(nodes1).build(); ClusterState state3 = ClusterState.builder(state1).nodes(nodes2).build(); - ClusterStateChecksum checksum1 = new ClusterStateChecksum(state2); - ClusterStateChecksum checksum2 = new ClusterStateChecksum(state3); + ClusterStateChecksum checksum1 = new ClusterStateChecksum(state2, threadPool); + ClusterStateChecksum checksum2 = new ClusterStateChecksum(state3, threadPool); assertEquals(checksum2, checksum1); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index 56857285fa8d3..35a8ae16cacf7 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -3123,7 +3123,7 @@ public void testWriteFullMetadataSuccessWithChecksumValidationEnabled() throws I .previousClusterUUID("prev-cluster-uuid") .routingTableVersion(1L) .indicesRouting(List.of(uploadedIndiceRoutingMetadata)) - .checksum(new ClusterStateChecksum(clusterState)) + .checksum(new ClusterStateChecksum(clusterState, threadPool)) .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -3193,7 +3193,7 @@ public void testWriteIncrementalMetadataSuccessWithChecksumValidationEnabled() t final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() .indices(Collections.emptyList()) - .checksum(new ClusterStateChecksum(clusterState)) + .checksum(new ClusterStateChecksum(clusterState, threadPool)) .build(); when((blobStoreRepository.basePath())).thenReturn(BlobPath.cleanPath().add("base-path")); @@ -3219,7 +3219,7 @@ public void testWriteIncrementalMetadataSuccessWithChecksumValidationEnabled() t .previousClusterUUID("prev-cluster-uuid") .routingTableVersion(1) .indicesRouting(List.of(uploadedIndiceRoutingMetadata)) - .checksum(new ClusterStateChecksum(clusterState)) + .checksum(new ClusterStateChecksum(clusterState, threadPool)) .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -3245,7 +3245,7 @@ public void testWriteIncrementalMetadataSuccessWithChecksumValidationModeNone() final ClusterMetadataManifest previousManifest = ClusterMetadataManifest.builder() .indices(Collections.emptyList()) - .checksum(new ClusterStateChecksum(clusterState)) + .checksum(new ClusterStateChecksum(clusterState, threadPool)) .build(); when((blobStoreRepository.basePath())).thenReturn(BlobPath.cleanPath().add("base-path")); @@ -3271,7 +3271,7 @@ public void testWriteIncrementalMetadataSuccessWithChecksumValidationModeNone() .previousClusterUUID("prev-cluster-uuid") .routingTableVersion(1) .indicesRouting(List.of(uploadedIndiceRoutingMetadata)) - .checksum(new ClusterStateChecksum(clusterState)) + .checksum(new ClusterStateChecksum(clusterState, threadPool)) .build(); assertThat(manifest.getIndices().size(), is(1)); @@ -3349,7 +3349,7 @@ public void testGetClusterStateForManifestWithChecksumValidationEnabled() throws initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.FAILURE); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).build(); remoteClusterStateService.start(); RemoteClusterStateService mockService = spy(remoteClusterStateService); @@ -3382,7 +3382,7 @@ public void testGetClusterStateForManifestWithChecksumValidationModeNone() throw initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.NONE); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).build(); remoteClusterStateService.start(); RemoteClusterStateService mockService = spy(remoteClusterStateService); @@ -3415,7 +3415,7 @@ public void testGetClusterStateForManifestWithChecksumValidationEnabledWithMisma initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.FAILURE); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).build(); remoteClusterStateService.start(); RemoteClusterStateService mockService = spy(remoteClusterStateService); @@ -3465,7 +3465,7 @@ public void testGetClusterStateForManifestWithChecksumValidationDebugWithMismatc ); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).build(); remoteClusterStateService.start(); RemoteClusterStateService mockService = spy(remoteClusterStateService); @@ -3505,7 +3505,7 @@ public void testGetClusterStateUsingDiffWithChecksum() throws IOException { initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.FAILURE); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).diffManifest(ClusterStateDiffManifest.builder().build()).build(); remoteClusterStateService.start(); @@ -3547,7 +3547,7 @@ public void testGetClusterStateUsingDiffWithChecksumModeNone() throws IOExceptio initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.NONE); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).diffManifest(ClusterStateDiffManifest.builder().build()).build(); remoteClusterStateService.start(); @@ -3589,7 +3589,7 @@ public void testGetClusterStateUsingDiffWithChecksumModeDebugMismatch() throws I initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.DEBUG); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).diffManifest(ClusterStateDiffManifest.builder().build()).build(); remoteClusterStateService.start(); @@ -3630,7 +3630,7 @@ public void testGetClusterStateUsingDiffWithChecksumModeTraceMismatch() throws I initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.TRACE); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).diffManifest(ClusterStateDiffManifest.builder().build()).build(); remoteClusterStateService.start(); @@ -3692,7 +3692,7 @@ public void testGetClusterStateUsingDiffWithChecksumMismatch() throws IOExceptio initializeWithChecksumEnabled(RemoteClusterStateService.RemoteClusterStateValidationMode.FAILURE); ClusterState clusterState = generateClusterStateWithAllAttributes().build(); ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().checksum( - new ClusterStateChecksum(clusterState) + new ClusterStateChecksum(clusterState, threadPool) ).diffManifest(ClusterStateDiffManifest.builder().build()).build(); remoteClusterStateService.start(); diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index ecd6620dbea15..df3df81361a12 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -565,7 +565,8 @@ public void testCleanupAsync() throws Exception { repositoryName, indexUUID, shardId, - pathStrategy + pathStrategy, + false ); verify(remoteSegmentStoreDirectoryFactory).newDirectory(repositoryName, indexUUID, shardId, pathStrategy); verify(threadPool, times(0)).executor(ThreadPool.Names.REMOTE_PURGE); diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java index 4ec68f7fb75b4..e6871414cf5e0 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTimestampAwareTranslogTests.java @@ -214,6 +214,7 @@ public void onFailure(Exception e) { // Old format metadata file String oldFormatMdFilename = "metadata__9223372036438563903__9223372036854774799__9223370311919910393__31__1"; assertNull(TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(oldFormatMdFilename)); + assertEquals(Long.MAX_VALUE - 9223372036854774799L, TranslogTransferMetadata.getMaxGenerationFromFileName(oldFormatMdFilename)); // Node id containing separator String nodeIdWithSeparator = @@ -221,10 +222,14 @@ public void onFailure(Exception e) { Tuple minMaxGen = TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(nodeIdWithSeparator); Long minGen = Long.MAX_VALUE - 9223372036438563958L; assertEquals(minGen, minMaxGen.v1()); + Long maxGen = Long.MAX_VALUE - 9223372036854774799L; + assertEquals(maxGen, minMaxGen.v2()); + assertEquals(Long.MAX_VALUE - 9223372036854774799L, TranslogTransferMetadata.getMaxGenerationFromFileName(nodeIdWithSeparator)); // Malformed md filename String malformedMdFileName = "metadata__9223372036438563903__9223372036854774799__9223370311919910393__node1__xyz__3__1"; assertNull(TranslogTransferMetadata.getMinMaxTranslogGenerationFromFilename(malformedMdFileName)); + assertEquals(Long.MAX_VALUE - 9223372036854774799L, TranslogTransferMetadata.getMaxGenerationFromFileName(malformedMdFileName)); } public void testGetMinMaxPrimaryTermFromFilename() throws Exception { @@ -330,43 +335,60 @@ public void testSimpleOperationsUpload() throws Exception { addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 2, primaryTerm.get(), new byte[] { 1 })); addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 3, primaryTerm.get(), new byte[] { 1 })); - addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 4, primaryTerm.get(), new byte[] { 1 })); - addToTranslogAndListAndUpload(translog, ops, new Translog.Index("5", 5, primaryTerm.get(), new byte[] { 1 })); - addToTranslogAndListAndUpload(translog, ops, new Translog.Index("6", 6, primaryTerm.get(), new byte[] { 1 })); assertBusy(() -> { assertEquals( - 16, + 10, blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() ); }); - assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); - RemoteStoreSettings.setPinnedTimestampsLookbackInterval(TimeValue.ZERO); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); // Fetch pinned timestamps so that it won't be stale updatePinnedTimstampTask.run(); + translog.setMinSeqNoToKeep(3); + translog.trimUnreferencedReaders(); - translog.setMinSeqNoToKeep(4); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("4", 4, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("5", 5, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("6", 6, primaryTerm.get(), new byte[] { 1 })); + + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + // Fetch pinned timestamps so that it won't be stale + updatePinnedTimstampTask.run(); + translog.setMinSeqNoToKeep(6); translog.trimUnreferencedReaders(); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + + assertEquals(1, translog.readers.size()); + assertBusy(() -> { + assertEquals(2, translog.allUploaded().size()); + assertEquals(4, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); + assertEquals( + 16, + blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() + ); + }, 30, TimeUnit.SECONDS); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("7", 7, primaryTerm.get(), new byte[] { 1 })); addToTranslogAndListAndUpload(translog, ops, new Translog.Index("8", 8, primaryTerm.get(), new byte[] { 1 })); - assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); + assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); // Fetch pinned timestamps so that it won't be stale updatePinnedTimstampTask.run(); translog.trimUnreferencedReaders(); - assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); - assertEquals(5, translog.readers.size()); + + assertEquals(3, translog.readers.size()); assertBusy(() -> { - assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); - assertEquals(10, translog.allUploaded().size()); + assertEquals(6, translog.allUploaded().size()); + assertEquals(3, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); assertEquals( - 10, + 12, blobStoreTransferService.listAll(getTranslogDirectory().add(DATA_DIR).add(String.valueOf(primaryTerm.get()))).size() ); - }, 60, TimeUnit.SECONDS); + }, 30, TimeUnit.SECONDS); } @Override @@ -397,7 +419,7 @@ public void testMetadataFileDeletion() throws Exception { ); updatePinnedTimstampTask.run(); translog.trimUnreferencedReaders(); - assertBusy(() -> { assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); }); + assertBusy(() -> { assertEquals(3, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size()); }); } public void testMetadataFileDeletionWithPinnedTimestamps() throws Exception { @@ -568,7 +590,7 @@ public void testDrainSync() throws Exception { assertBusy(() -> assertTrue(translog.isRemoteGenerationDeletionPermitsAvailable())); assertEquals(1, translog.readers.size()); assertBusy(() -> assertEquals(2, translog.allUploaded().size())); - assertBusy(() -> assertEquals(1, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); + assertBusy(() -> assertEquals(2, blobStoreTransferService.listAll(getTranslogDirectory().add(METADATA_DIR)).size())); } @Override @@ -647,7 +669,7 @@ public void testGetGenerationsToBeDeletedEmptyMetadataFilesNotToBeDeleted() thro Set generations = ((RemoteFsTimestampAwareTranslog) translog).getGenerationsToBeDeleted( metadataFilesNotToBeDeleted, metadataFilesToBeDeleted, - true + Long.MAX_VALUE ); Set md1Generations = LongStream.rangeClosed(4, 7).boxed().collect(Collectors.toSet()); Set md2Generations = LongStream.rangeClosed(17, 37).boxed().collect(Collectors.toSet()); @@ -683,7 +705,7 @@ public void testGetGenerationsToBeDeleted() throws IOException { Set generations = ((RemoteFsTimestampAwareTranslog) translog).getGenerationsToBeDeleted( metadataFilesNotToBeDeleted, metadataFilesToBeDeleted, - true + Long.MAX_VALUE ); Set md1Generations = LongStream.rangeClosed(5, 7).boxed().collect(Collectors.toSet()); Set md2Generations = LongStream.rangeClosed(17, 25).boxed().collect(Collectors.toSet()); @@ -708,7 +730,10 @@ public void testGetMetadataFilesToBeDeletedNoExclusion() { "metadata__9223372036438563903__9223372036854775701__9223370311919910403__31__9223372036854775701__1" ); - assertEquals(metadataFiles, ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles)); + assertEquals( + metadataFiles, + RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted(metadataFiles, new HashMap<>(), Long.MAX_VALUE, false, logger) + ); } public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeOnly() { @@ -724,7 +749,13 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeOnly() { "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" ); - List metadataFilesToBeDeleted = ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles); + List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( + metadataFiles, + new HashMap<>(), + Long.MAX_VALUE, + false, + logger + ); assertEquals(1, metadataFilesToBeDeleted.size()); assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); } @@ -746,7 +777,13 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnPinningOnly() throws "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" ); - List metadataFilesToBeDeleted = ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles); + List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( + metadataFiles, + new HashMap<>(), + Long.MAX_VALUE, + false, + logger + ); assertEquals(2, metadataFilesToBeDeleted.size()); assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(1)); @@ -769,11 +806,77 @@ public void testGetMetadataFilesToBeDeletedExclusionBasedOnAgeAndPinning() throw "metadata__9223372036438563903__9223372036854775701__" + md3Timestamp + "__31__9223372036854775701__1" ); - List metadataFilesToBeDeleted = ((RemoteFsTimestampAwareTranslog) translog).getMetadataFilesToBeDeleted(metadataFiles); + List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( + metadataFiles, + new HashMap<>(), + Long.MAX_VALUE, + false, + logger + ); assertEquals(1, metadataFilesToBeDeleted.size()); assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(0)); } + public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationOnly() throws IOException { + long currentTimeInMillis = System.currentTimeMillis(); + String md1Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 200000); + String md2Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 300000); + String md3Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 600000); + + when(blobContainer.listBlobs()).thenReturn(Map.of()); + + updatePinnedTimstampTask.run(); + + List metadataFiles = List.of( + // MaxGen 7 + "metadata__9223372036438563903__9223372036854775800__" + md1Timestamp + "__31__9223372036854775106__1", + // MaxGen 12 + "metadata__9223372036438563903__9223372036854775795__" + md2Timestamp + "__31__9223372036854775803__1", + // MaxGen 10 + "metadata__9223372036438563903__9223372036854775798__" + md3Timestamp + "__31__9223372036854775701__1" + ); + + List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( + metadataFiles, + new HashMap<>(), + 10L, + false, + logger + ); + assertEquals(2, metadataFilesToBeDeleted.size()); + assertEquals(metadataFiles.get(0), metadataFilesToBeDeleted.get(0)); + assertEquals(metadataFiles.get(2), metadataFilesToBeDeleted.get(1)); + } + + public void testGetMetadataFilesToBeDeletedExclusionBasedOnGenerationDeleteIndex() throws IOException { + long currentTimeInMillis = System.currentTimeMillis(); + String md1Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 200000); + String md2Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 300000); + String md3Timestamp = RemoteStoreUtils.invertLong(currentTimeInMillis - 600000); + + when(blobContainer.listBlobs()).thenReturn(Map.of()); + + updatePinnedTimstampTask.run(); + + List metadataFiles = List.of( + // MaxGen 7 + "metadata__9223372036438563903__9223372036854775800__" + md1Timestamp + "__31__9223372036854775106__1", + // MaxGen 12 + "metadata__9223372036438563903__9223372036854775795__" + md2Timestamp + "__31__9223372036854775803__1", + // MaxGen 17 + "metadata__9223372036438563903__9223372036854775790__" + md3Timestamp + "__31__9223372036854775701__1" + ); + + List metadataFilesToBeDeleted = RemoteFsTimestampAwareTranslog.getMetadataFilesToBeDeleted( + metadataFiles, + new HashMap<>(), + 10L, + true, + logger + ); + assertEquals(metadataFiles, metadataFilesToBeDeleted); + } + public void testIsGenerationPinned() { TreeSet> pinnedGenerations = new TreeSet<>(new TreeSet<>((o1, o2) -> { if (Objects.equals(o1.v1(), o2.v1()) == false) { diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java index 339d876274557..03c77a9a83f57 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFsTranslogTests.java @@ -1801,6 +1801,83 @@ public void testDownloadWithEmptyTranslogOnlyInLocal() throws IOException { assertArrayEquals(filesPostFirstDownload, filesPostSecondDownload); } + public void testSyncWithGlobalCheckpointUpdate() throws IOException { + ArrayList ops = new ArrayList<>(); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("2", 1, primaryTerm.get(), new byte[] { 2 })); + + // Set a global checkpoint + long initialGlobalCheckpoint = 1L; + globalCheckpoint.set(initialGlobalCheckpoint); + + // Sync the translog + translog.sync(); + + // Verify that the globalCheckpointSynced is updated + assertEquals(initialGlobalCheckpoint, ((RemoteFsTranslog) translog).getLastSyncedCheckpoint().globalCheckpoint); + + // Update global checkpoint + long newGlobalCheckpoint = 2L; + globalCheckpoint.set(newGlobalCheckpoint); + + // Add a new operation and sync + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("3", 2, primaryTerm.get(), new byte[] { 3 })); + translog.sync(); + + // Verify that the globalCheckpointSynced is updated to the new value + assertEquals(newGlobalCheckpoint, ((RemoteFsTranslog) translog).getLastSyncedCheckpoint().globalCheckpoint); + } + + public void testSyncNeededWithGlobalCheckpointUpdate() throws IOException { + ArrayList ops = new ArrayList<>(); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + + // Set initial global checkpoint + long initialGlobalCheckpoint = 0L; + globalCheckpoint.set(initialGlobalCheckpoint); + + // Sync the translog + translog.sync(); + + // Verify that sync is not needed + assertFalse(translog.syncNeeded()); + + // Update global checkpoint + long newGlobalCheckpoint = 1L; + globalCheckpoint.set(newGlobalCheckpoint); + + // Verify that sync is now needed due to global checkpoint update + assertTrue(translog.syncNeeded()); + + // Sync again + translog.sync(); + + // Verify that sync is not needed after syncing + assertFalse(translog.syncNeeded()); + } + + public void testGlobalCheckpointUpdateDuringClose() throws IOException { + ArrayList ops = new ArrayList<>(); + addToTranslogAndListAndUpload(translog, ops, new Translog.Index("1", 0, primaryTerm.get(), new byte[] { 1 })); + + // Set initial global checkpoint + long initialGlobalCheckpoint = 0L; + globalCheckpoint.set(initialGlobalCheckpoint); + + // Sync the translog + translog.sync(); + + // Update global checkpoint + long newGlobalCheckpoint = 1L; + globalCheckpoint.set(newGlobalCheckpoint); + + // Close the translog + translog.close(); + + // Verify that the last synced checkpoint includes the updated global checkpoint + assertEquals(newGlobalCheckpoint, ((RemoteFsTranslog) translog).getLastSyncedCheckpoint().globalCheckpoint); + } + public class ThrowingBlobRepository extends FsRepository { private final Environment environment; diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java index 96b1c75371697..1d1b509ae94e5 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestIndicesActionTests.java @@ -47,9 +47,13 @@ import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexSettings; +import org.opensearch.rest.action.list.RestIndicesListAction; +import org.opensearch.rest.pagination.PageToken; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; +import org.junit.Before; +import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; @@ -63,13 +67,14 @@ public class RestIndicesActionTests extends OpenSearchTestCase { - public void testBuildTable() { - final int numIndices = randomIntBetween(3, 20); - final Map indicesSettings = new LinkedHashMap<>(); - final Map indicesMetadatas = new LinkedHashMap<>(); - final Map indicesHealths = new LinkedHashMap<>(); - final Map indicesStats = new LinkedHashMap<>(); + final Map indicesSettings = new LinkedHashMap<>(); + final Map indicesMetadatas = new LinkedHashMap<>(); + final Map indicesHealths = new LinkedHashMap<>(); + final Map indicesStats = new LinkedHashMap<>(); + @Before + public void setup() { + final int numIndices = randomIntBetween(3, 20); for (int i = 0; i < numIndices; i++) { String indexName = "index-" + i; @@ -136,11 +141,59 @@ public void testBuildTable() { } } } + } + public void testBuildTable() { final RestIndicesAction action = new RestIndicesAction(); - final Table table = action.buildTable(new FakeRestRequest(), indicesSettings, indicesHealths, indicesStats, indicesMetadatas); + final Table table = action.buildTable( + new FakeRestRequest(), + indicesSettings, + indicesHealths, + indicesStats, + indicesMetadatas, + action.getTableIterator(new String[0], indicesSettings), + null + ); // now, verify the table is correct + assertNotNull(table); + + assertTableHeaders(table); + + assertThat(table.getRows().size(), equalTo(indicesMetadatas.size())); + assertTableRows(table); + } + + public void testBuildPaginatedTable() { + final RestIndicesAction action = new RestIndicesAction(); + final RestIndicesListAction indicesListAction = new RestIndicesListAction(); + List indicesList = new ArrayList<>(indicesMetadatas.keySet()); + // Using half of the indices from metadata list for a page + String[] indicesToBeQueried = indicesList.subList(0, indicesMetadatas.size() / 2).toArray(new String[0]); + PageToken pageToken = new PageToken("foo", "indices"); + final Table table = action.buildTable( + new FakeRestRequest(), + indicesSettings, + indicesHealths, + indicesStats, + indicesMetadatas, + indicesListAction.getTableIterator(indicesToBeQueried, indicesSettings), + pageToken + ); + + // verifying table + assertNotNull(table); + assertTableHeaders(table); + assertNotNull(table.getPageToken()); + assertEquals(pageToken.getNextToken(), table.getPageToken().getNextToken()); + assertEquals(pageToken.getPaginatedEntity(), table.getPageToken().getPaginatedEntity()); + + // Table should only contain the indices present in indicesToBeQueried + assertThat(table.getRows().size(), equalTo(indicesMetadatas.size() / 2)); + assertTableRows(table); + } + + private void assertTableHeaders(Table table) { List headers = table.getHeaders(); assertThat(headers.get(0).value, equalTo("health")); assertThat(headers.get(1).value, equalTo("status")); @@ -148,9 +201,10 @@ public void testBuildTable() { assertThat(headers.get(3).value, equalTo("uuid")); assertThat(headers.get(4).value, equalTo("pri")); assertThat(headers.get(5).value, equalTo("rep")); + } + private void assertTableRows(Table table) { final List> rows = table.getRows(); - assertThat(rows.size(), equalTo(indicesMetadatas.size())); for (final List row : rows) { final String indexName = (String) row.get(2).value; diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java index 8183cb1d3b910..a82e563d70273 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestTableTests.java @@ -37,6 +37,7 @@ import org.opensearch.core.xcontent.MediaTypeRegistry; import org.opensearch.rest.AbstractRestChannel; import org.opensearch.rest.RestResponse; +import org.opensearch.rest.pagination.PageToken; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; import org.junit.Before; @@ -64,9 +65,14 @@ public class RestTableTests extends OpenSearchTestCase { private static final String ACCEPT = "Accept"; private static final String TEXT_PLAIN = "text/plain; charset=UTF-8"; private static final String TEXT_TABLE_BODY = "foo foo foo foo foo foo foo foo\n"; + private static final String PAGINATED_TEXT_TABLE_BODY = "foo foo foo foo foo foo foo foo\nnext_token foo\n"; private static final String JSON_TABLE_BODY = "[{\"bulk.foo\":\"foo\",\"bulk.bar\":\"foo\",\"aliasedBulk\":\"foo\"," + "\"aliasedSecondBulk\":\"foo\",\"unmatched\":\"foo\"," + "\"invalidAliasesBulk\":\"foo\",\"timestamp\":\"foo\",\"epoch\":\"foo\"}]"; + private static final String PAGINATED_JSON_TABLE_BODY = + "{\"next_token\":\"foo\",\"entities\":[{\"bulk.foo\":\"foo\",\"bulk.bar\":\"foo\",\"aliasedBulk\":\"foo\"," + + "\"aliasedSecondBulk\":\"foo\",\"unmatched\":\"foo\"," + + "\"invalidAliasesBulk\":\"foo\",\"timestamp\":\"foo\",\"epoch\":\"foo\"}]}"; private static final String YAML_TABLE_BODY = "---\n" + "- bulk.foo: \"foo\"\n" + " bulk.bar: \"foo\"\n" @@ -76,6 +82,17 @@ public class RestTableTests extends OpenSearchTestCase { + " invalidAliasesBulk: \"foo\"\n" + " timestamp: \"foo\"\n" + " epoch: \"foo\"\n"; + private static final String PAGINATED_YAML_TABLE_BODY = "---\n" + + "next_token: \"foo\"\n" + + "entities:\n" + + "- bulk.foo: \"foo\"\n" + + " bulk.bar: \"foo\"\n" + + " aliasedBulk: \"foo\"\n" + + " aliasedSecondBulk: \"foo\"\n" + + " unmatched: \"foo\"\n" + + " invalidAliasesBulk: \"foo\"\n" + + " timestamp: \"foo\"\n" + + " epoch: \"foo\"\n"; private Table table; private FakeRestRequest restRequest; @@ -83,20 +100,7 @@ public class RestTableTests extends OpenSearchTestCase { public void setup() { restRequest = new FakeRestRequest(); table = new Table(); - table.startHeaders(); - table.addCell("bulk.foo", "alias:f;desc:foo"); - table.addCell("bulk.bar", "alias:b;desc:bar"); - // should be matched as well due to the aliases - table.addCell("aliasedBulk", "alias:bulkWhatever;desc:bar"); - table.addCell("aliasedSecondBulk", "alias:foobar,bulkolicious,bulkotastic;desc:bar"); - // no match - table.addCell("unmatched", "alias:un.matched;desc:bar"); - // invalid alias - table.addCell("invalidAliasesBulk", "alias:,,,;desc:bar"); - // timestamp - table.addCell("timestamp", "alias:ts"); - table.addCell("epoch", "alias:t"); - table.endHeaders(); + addHeaders(table); } public void testThatDisplayHeadersSupportWildcards() throws Exception { @@ -121,10 +125,28 @@ public void testThatWeUseTheAcceptHeaderJson() throws Exception { assertResponse(Collections.singletonMap(ACCEPT, Collections.singletonList(APPLICATION_JSON)), APPLICATION_JSON, JSON_TABLE_BODY); } + public void testThatWeUseTheAcceptHeaderJsonForPaginatedTable() throws Exception { + assertResponse( + Collections.singletonMap(ACCEPT, Collections.singletonList(APPLICATION_JSON)), + APPLICATION_JSON, + PAGINATED_JSON_TABLE_BODY, + getPaginatedTable() + ); + } + public void testThatWeUseTheAcceptHeaderYaml() throws Exception { assertResponse(Collections.singletonMap(ACCEPT, Collections.singletonList(APPLICATION_YAML)), APPLICATION_YAML, YAML_TABLE_BODY); } + public void testThatWeUseTheAcceptHeaderYamlForPaginatedTable() throws Exception { + assertResponse( + Collections.singletonMap(ACCEPT, Collections.singletonList(APPLICATION_YAML)), + APPLICATION_YAML, + PAGINATED_YAML_TABLE_BODY, + getPaginatedTable() + ); + } + public void testThatWeUseTheAcceptHeaderSmile() throws Exception { assertResponseContentType(Collections.singletonMap(ACCEPT, Collections.singletonList(APPLICATION_SMILE)), APPLICATION_SMILE); } @@ -137,6 +159,15 @@ public void testThatWeUseTheAcceptHeaderText() throws Exception { assertResponse(Collections.singletonMap(ACCEPT, Collections.singletonList(TEXT_PLAIN)), TEXT_PLAIN, TEXT_TABLE_BODY); } + public void testThatWeUseTheAcceptHeaderTextForPaginatedTable() throws Exception { + assertResponse( + Collections.singletonMap(ACCEPT, Collections.singletonList(TEXT_PLAIN)), + TEXT_PLAIN, + PAGINATED_TEXT_TABLE_BODY, + getPaginatedTable() + ); + } + public void testIgnoreContentType() throws Exception { assertResponse(Collections.singletonMap(CONTENT_TYPE, Collections.singletonList(APPLICATION_JSON)), TEXT_PLAIN, TEXT_TABLE_BODY); } @@ -261,6 +292,10 @@ public void testMultiSort() { } private RestResponse assertResponseContentType(Map> headers, String mediaType) throws Exception { + return assertResponseContentType(headers, mediaType, table); + } + + private RestResponse assertResponseContentType(Map> headers, String mediaType, Table table) throws Exception { FakeRestRequest requestWithAcceptHeader = new FakeRestRequest.Builder(xContentRegistry()).withHeaders(headers).build(); table.startRow(); table.addCell("foo"); @@ -282,7 +317,11 @@ public void sendResponse(RestResponse response) {} } private void assertResponse(Map> headers, String mediaType, String body) throws Exception { - RestResponse response = assertResponseContentType(headers, mediaType); + assertResponse(headers, mediaType, body, table); + } + + private void assertResponse(Map> headers, String mediaType, String body, Table table) throws Exception { + RestResponse response = assertResponseContentType(headers, mediaType, table); assertThat(response.content().utf8ToString(), equalTo(body)); } @@ -294,4 +333,28 @@ private List getHeaderNames(List headers) { return headerNames; } + + private Table getPaginatedTable() { + PageToken pageToken = new PageToken("foo", "entities"); + Table paginatedTable = new Table(pageToken); + addHeaders(paginatedTable); + return paginatedTable; + } + + private void addHeaders(Table table) { + table.startHeaders(); + table.addCell("bulk.foo", "alias:f;desc:foo"); + table.addCell("bulk.bar", "alias:b;desc:bar"); + // should be matched as well due to the aliases + table.addCell("aliasedBulk", "alias:bulkWhatever;desc:bar"); + table.addCell("aliasedSecondBulk", "alias:foobar,bulkolicious,bulkotastic;desc:bar"); + // no match + table.addCell("unmatched", "alias:un.matched;desc:bar"); + // invalid alias + table.addCell("invalidAliasesBulk", "alias:,,,;desc:bar"); + // timestamp + table.addCell("timestamp", "alias:ts"); + table.addCell("epoch", "alias:t"); + table.endHeaders(); + } } diff --git a/server/src/test/java/org/opensearch/rest/pagination/IndexPaginationStrategyTests.java b/server/src/test/java/org/opensearch/rest/pagination/IndexPaginationStrategyTests.java new file mode 100644 index 0000000000000..01464b489e26e --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/pagination/IndexPaginationStrategyTests.java @@ -0,0 +1,399 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.pagination; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.routing.IndexRoutingTable; +import org.opensearch.cluster.routing.RoutingTable; +import org.opensearch.test.OpenSearchTestCase; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; +import static org.opensearch.rest.pagination.PageParams.PARAM_ASC_SORT_VALUE; +import static org.opensearch.rest.pagination.PageParams.PARAM_DESC_SORT_VALUE; +import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; + +public class IndexPaginationStrategyTests extends OpenSearchTestCase { + + public void testRetrieveAllIndicesWithVaryingPageSize() { + List indexNumberList = new ArrayList<>(); + final int totalIndices = 100; + for (int indexNumber = 1; indexNumber <= 100; indexNumber++) { + indexNumberList.add(indexNumber); + } + // creating a cluster state with 100 indices + Collections.shuffle(indexNumberList, getRandom()); + ClusterState clusterState = getRandomClusterState(indexNumberList); + + // Checking pagination response for different pageSizes, which has a mix of even and odd numbers + // to ensure number of indices in last page is not always equal to pageSize. + List pageSizeList = List.of(1, 6, 10, 13); + List sortOrderList = List.of(PARAM_ASC_SORT_VALUE, PARAM_DESC_SORT_VALUE); + for (String sortOrder : sortOrderList) { + for (int pageSize : pageSizeList) { + String requestedToken = null; + int totalPagesToFetch = (int) Math.ceil(totalIndices / (pageSize * 1.0)); + int indicesRemaining = totalIndices; + for (int pageNumber = 1; pageNumber <= totalPagesToFetch; pageNumber++) { + PageParams pageParams = new PageParams(requestedToken, sortOrder, pageSize); + IndexPaginationStrategy paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + if (pageNumber < totalPagesToFetch) { + assertNotNull(paginationStrategy.getResponseToken().getNextToken()); + } else { + assertNull(paginationStrategy.getResponseToken().getNextToken()); + } + requestedToken = paginationStrategy.getResponseToken().getNextToken(); + // Asserting all the indices received + int responseItr = 0; + if (PARAM_ASC_SORT_VALUE.equals(sortOrder)) { + for (int indexNumber = (pageNumber - 1) * pageSize; indexNumber < Math.min( + 100, + pageNumber * pageSize + ); indexNumber++) { + assertEquals("test-index-" + (indexNumber + 1), paginationStrategy.getRequestedEntities().get(responseItr)); + responseItr++; + } + } else { + int endIndexNumberForPage = Math.max(indicesRemaining - pageSize, 0); + for (; indicesRemaining > endIndexNumberForPage; indicesRemaining--) { + assertEquals("test-index-" + indicesRemaining, paginationStrategy.getRequestedEntities().get(responseItr)); + responseItr++; + } + } + assertEquals(responseItr, paginationStrategy.getRequestedEntities().size()); + } + } + } + } + + public void testRetrieveAllIndicesInAscOrderWhileIndicesGetCreatedAndDeleted() { + List indexNumberList = new ArrayList<>(); + List deletedIndices = new ArrayList<>(); + final int totalIndices = 100; + final int numIndicesToDelete = 10; + final int numIndicesToCreate = 5; + List indicesFetched = new ArrayList<>(); + for (int indexNumber = 1; indexNumber <= 100; indexNumber++) { + indexNumberList.add(indexNumber); + } + ClusterState clusterState = getRandomClusterState(indexNumberList); + + int pageSize = 6; + String requestedToken = null; + int numPages = 0; + do { + numPages++; + PageParams pageParams = new PageParams(requestedToken, PARAM_ASC_SORT_VALUE, pageSize); + IndexPaginationStrategy paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertNotNull(paginationStrategy); + assertNotNull(paginationStrategy.getResponseToken()); + requestedToken = paginationStrategy.getResponseToken().getNextToken(); + // randomly deleting 10 indices after 3rd call + if (numPages == 3) { + deletedIndices = indexNumberList.subList(20, indexNumberList.size()); + Collections.shuffle(deletedIndices, getRandom()); + for (int pos = 0; pos < numIndicesToDelete; pos++) { + clusterState = deleteIndexFromClusterState(clusterState, deletedIndices.get(pos)); + } + } + // creating 5 indices after 5th call + if (numPages == 5) { + for (int indexNumber = totalIndices + 1; indexNumber <= totalIndices + numIndicesToCreate; indexNumber++) { + clusterState = addIndexToClusterState(clusterState, indexNumber); + } + } + if (requestedToken == null) { + assertEquals(paginationStrategy.getRequestedEntities().size(), 5); + } else { + assertEquals(paginationStrategy.getRequestedEntities().size(), pageSize); + } + + indicesFetched.addAll(paginationStrategy.getRequestedEntities()); + } while (Objects.nonNull(requestedToken)); + + assertEquals((int) Math.ceil((double) (totalIndices + numIndicesToCreate - numIndicesToDelete) / pageSize), numPages); + assertEquals(totalIndices + numIndicesToCreate - numIndicesToDelete, indicesFetched.size()); + + // none of the deleted index should appear in the list of fetched indices + for (int deletedIndexPos = 0; deletedIndexPos < numIndicesToDelete; deletedIndexPos++) { + assertFalse(indicesFetched.contains("test-index-" + deletedIndices.get(deletedIndexPos))); + } + + // all the newly created indices should be present in the list of fetched indices + for (int indexNumber = totalIndices + 1; indexNumber <= totalIndices + numIndicesToCreate; indexNumber++) { + assertTrue(indicesFetched.contains("test-index-" + indexNumber)); + } + } + + public void testRetrieveAllIndicesInDescOrderWhileIndicesGetCreatedAndDeleted() { + List indexNumberList = new ArrayList<>(); + List deletedIndices = new ArrayList<>(); + final int totalIndices = 100; + final int numIndicesToDelete = 9; + final int numIndicesToCreate = 5; + List indicesFetched = new ArrayList<>(); + for (int indexNumber = 1; indexNumber <= 100; indexNumber++) { + indexNumberList.add(indexNumber); + } + ClusterState clusterState = getRandomClusterState(indexNumberList); + + int pageSize = 6; + String requestedToken = null; + int numPages = 0; + do { + numPages++; + PageParams pageParams = new PageParams(requestedToken, PARAM_DESC_SORT_VALUE, pageSize); + IndexPaginationStrategy paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertNotNull(paginationStrategy); + assertNotNull(paginationStrategy.getResponseToken()); + requestedToken = paginationStrategy.getResponseToken().getNextToken(); + // randomly deleting 10 indices after 3rd call + if (numPages == 3) { + deletedIndices = indexNumberList.subList(0, 80); + Collections.shuffle(deletedIndices, getRandom()); + for (int pos = 0; pos < numIndicesToDelete; pos++) { + clusterState = deleteIndexFromClusterState(clusterState, deletedIndices.get(pos)); + } + } + // creating 5 indices after 5th call + if (numPages == 5) { + for (int indexNumber = totalIndices + 1; indexNumber <= totalIndices + numIndicesToCreate; indexNumber++) { + clusterState = addIndexToClusterState(clusterState, indexNumber); + } + } + if (requestedToken == null) { + assertEquals(paginationStrategy.getRequestedEntities().size(), (totalIndices - numIndicesToDelete) % pageSize); + } else { + assertEquals(paginationStrategy.getRequestedEntities().size(), pageSize); + } + + indicesFetched.addAll(paginationStrategy.getRequestedEntities()); + } while (Objects.nonNull(requestedToken)); + + assertEquals((int) Math.ceil((double) (totalIndices - numIndicesToDelete) / pageSize), numPages); + assertEquals(totalIndices - numIndicesToDelete, indicesFetched.size()); + + // none of the deleted index should appear in the list of fetched indices + for (int deletedIndexPos = 0; deletedIndexPos < numIndicesToDelete; deletedIndexPos++) { + assertFalse(indicesFetched.contains("test-index-" + deletedIndices.get(deletedIndexPos))); + } + + // none of the newly created indices should be present in the list of fetched indices + for (int indexNumber = totalIndices + 1; indexNumber <= totalIndices + numIndicesToCreate; indexNumber++) { + assertFalse(indicesFetched.contains("test-index-" + indexNumber)); + } + } + + public void testRetrieveIndicesWithSizeOneAndCurrentIndexGetsDeletedAscOrder() { + // Query1 with 4 indices in clusterState (test-index1,2,3,4) + ClusterState clusterState = getRandomClusterState(List.of(1, 2, 3, 4)); + PageParams pageParams = new PageParams(null, PARAM_ASC_SORT_VALUE, 1); + IndexPaginationStrategy paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 1, true); + assertEquals("test-index-1", paginationStrategy.getRequestedEntities().get(0)); + + // Adding index5 to clusterState, before executing next query. + clusterState = addIndexToClusterState(clusterState, 5); + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_ASC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 1, true); + assertEquals("test-index-2", paginationStrategy.getRequestedEntities().get(0)); + + // Deleting test-index-2 which has already been displayed, still test-index-3 should get displayed + clusterState = deleteIndexFromClusterState(clusterState, 2); + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_ASC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 1, true); + assertEquals("test-index-3", paginationStrategy.getRequestedEntities().get(0)); + + // Deleting test-index-4 which is not yet displayed which otherwise should have been displayed in the following query + // instead test-index-5 should now get displayed. + clusterState = deleteIndexFromClusterState(clusterState, 4); + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_ASC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 1, false); + assertEquals("test-index-5", paginationStrategy.getRequestedEntities().get(0)); + + } + + public void testRetrieveIndicesWithSizeOneAndCurrentIndexGetsDeletedDescOrder() { + // Query1 with 4 indices in clusterState (test-index1,2,3,4). + ClusterState clusterState = getRandomClusterState(List.of(1, 2, 3, 4)); + PageParams pageParams = new PageParams(null, PARAM_DESC_SORT_VALUE, 1); + IndexPaginationStrategy paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 1, true); + assertEquals("test-index-4", paginationStrategy.getRequestedEntities().get(0)); + + // adding test-index-5 to clusterState, before executing next query. + clusterState = addIndexToClusterState(clusterState, 5); + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_DESC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 1, true); + assertEquals("test-index-3", paginationStrategy.getRequestedEntities().get(0)); + + // Deleting test-index-3 which has already been displayed, still index2 should get displayed. + clusterState = deleteIndexFromClusterState(clusterState, 3); + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_DESC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 1, true); + assertEquals("test-index-2", paginationStrategy.getRequestedEntities().get(0)); + + // Deleting test-index-1 which is not yet displayed which otherwise should have been displayed in the following query. + clusterState = deleteIndexFromClusterState(clusterState, 1); + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_DESC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertPaginationResult(paginationStrategy, 0, false); + } + + public void testRetrieveIndicesWithMultipleDeletionsAtOnceAscOrder() { + // Query1 with 5 indices in clusterState (test-index1,2,3,4,5). + ClusterState clusterState = getRandomClusterState(List.of(1, 2, 3, 4, 5)); + PageParams pageParams = new PageParams(null, PARAM_ASC_SORT_VALUE, 1); + IndexPaginationStrategy paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertEquals(1, paginationStrategy.getRequestedEntities().size()); + assertEquals("test-index-1", paginationStrategy.getRequestedEntities().get(0)); + assertNotNull(paginationStrategy.getResponseToken().getNextToken()); + + // executing next query without any changes to clusterState + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_ASC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertEquals(1, paginationStrategy.getRequestedEntities().size()); + assertEquals("test-index-2", paginationStrategy.getRequestedEntities().get(0)); + assertNotNull(paginationStrategy.getResponseToken().getNextToken()); + + // Deleting test-index-1, test-index-2 & test-index-3 and executing next query. test-index-4 should get displayed. + clusterState = deleteIndexFromClusterState(clusterState, 1); + clusterState = deleteIndexFromClusterState(clusterState, 2); + clusterState = deleteIndexFromClusterState(clusterState, 3); + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_ASC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertEquals(1, paginationStrategy.getRequestedEntities().size()); + assertEquals("test-index-4", paginationStrategy.getRequestedEntities().get(0)); + assertNotNull(paginationStrategy.getResponseToken().getNextToken()); + + // Executing the last query without any further change. Should result in test-index-5 and nextToken as null. + pageParams = new PageParams(paginationStrategy.getResponseToken().getNextToken(), PARAM_ASC_SORT_VALUE, 1); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertEquals(1, paginationStrategy.getRequestedEntities().size()); + assertEquals("test-index-5", paginationStrategy.getRequestedEntities().get(0)); + assertNull(paginationStrategy.getResponseToken().getNextToken()); + } + + public void testRetrieveIndicesWithTokenModifiedToQueryBeyondTotal() { + ClusterState clusterState = getRandomClusterState(List.of(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)); + PageParams pageParams = new PageParams(null, PARAM_ASC_SORT_VALUE, 10); + IndexPaginationStrategy paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertEquals(10, paginationStrategy.getRequestedEntities().size()); + assertNull(paginationStrategy.getResponseToken().getNextToken()); + // creating a token with last sent index as test-index-10 + String token = clusterState.metadata().indices().get("test-index-10").getCreationDate() + "|" + "test-index-10"; + pageParams = new PageParams(Base64.getEncoder().encodeToString(token.getBytes(UTF_8)), PARAM_ASC_SORT_VALUE, 10); + paginationStrategy = new IndexPaginationStrategy(pageParams, clusterState); + assertEquals(0, paginationStrategy.getRequestedEntities().size()); + assertNull(paginationStrategy.getResponseToken().getNextToken()); + } + + public void testCreatingIndexStrategyPageTokenWithRequestedTokenNull() { + try { + new IndexPaginationStrategy.IndexStrategyToken(null); + fail("expected exception"); + } catch (Exception e) { + assert e.getMessage().contains("requestedTokenString can not be null"); + } + } + + public void testIndexStrategyPageTokenWithWronglyEncryptedRequestToken() { + assertThrows(OpenSearchParseException.class, () -> new IndexPaginationStrategy.IndexStrategyToken("3%4%5")); + } + + public void testIndexStrategyPageTokenWithIncorrectNumberOfElementsInRequestedToken() { + assertThrows( + OpenSearchParseException.class, + () -> new IndexPaginationStrategy.IndexStrategyToken(PaginationStrategy.encryptStringToken("1725361543")) + ); + assertThrows( + OpenSearchParseException.class, + () -> new IndexPaginationStrategy.IndexStrategyToken(PaginationStrategy.encryptStringToken("1|1725361543|index|12345")) + ); + } + + public void testIndexStrategyPageTokenWithInvalidValuesInRequestedToken() { + assertThrows( + OpenSearchParseException.class, + () -> new IndexPaginationStrategy.IndexStrategyToken(PaginationStrategy.encryptStringToken("-1725361543|index")) + ); + } + + public void testCreatingIndexStrategyPageTokenWithNameOfLastRespondedIndexNull() { + try { + new IndexPaginationStrategy.IndexStrategyToken(1234l, null); + fail("expected exception"); + } catch (Exception e) { + assert e.getMessage().contains("index name should be provided"); + } + } + + /** + * @param indexNumbers would be used to create indices having names with integer appended after foo, like foo1, foo2. + * @return random clusterState consisting of indices having their creation times set to the integer used to name them. + */ + private ClusterState getRandomClusterState(List indexNumbers) { + ClusterState clusterState = ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().build()) + .routingTable(RoutingTable.builder().build()) + .build(); + for (Integer indexNumber : indexNumbers) { + clusterState = addIndexToClusterState(clusterState, indexNumber); + } + return clusterState; + } + + private ClusterState addIndexToClusterState(ClusterState clusterState, int indexNumber) { + IndexMetadata indexMetadata = IndexMetadata.builder("test-index-" + indexNumber) + .settings( + settings(Version.CURRENT).put(SETTING_CREATION_DATE, Instant.now().plus(indexNumber, ChronoUnit.SECONDS).toEpochMilli()) + ) + .numberOfShards(between(1, 10)) + .numberOfReplicas(randomInt(20)) + .build(); + IndexRoutingTable.Builder indexRoutingTableBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex()); + return ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).put(indexMetadata, true).build()) + .routingTable(RoutingTable.builder(clusterState.routingTable()).add(indexRoutingTableBuilder).build()) + .build(); + } + + private ClusterState deleteIndexFromClusterState(ClusterState clusterState, int indexNumber) { + return ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).remove("test-index-" + indexNumber)) + .routingTable(RoutingTable.builder(clusterState.routingTable()).remove("test-index-" + indexNumber).build()) + .build(); + } + + private void assertPaginationResult(IndexPaginationStrategy paginationStrategy, int expectedEntities, boolean tokenExpected) { + assertNotNull(paginationStrategy); + assertEquals(expectedEntities, paginationStrategy.getRequestedEntities().size()); + assertNotNull(paginationStrategy.getResponseToken()); + assertEquals(tokenExpected, Objects.nonNull(paginationStrategy.getResponseToken().getNextToken())); + } + +} diff --git a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java index 9697f4cee0d58..da8ccc9e121e0 100644 --- a/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/opensearch/search/builder/SearchSourceBuilderTests.java @@ -421,6 +421,27 @@ public void testDerivedFieldsParsingAndSerializationObjectType() throws IOExcept } } + public void testSearchPipelineParsingAndSerialization() throws IOException { + String restContent = "{ \"query\": { \"match_all\": {} }, \"from\": 0, \"size\": 10, \"search_pipeline\": \"my_pipeline\" }"; + String expectedContent = "{\"from\":0,\"size\":10,\"query\":{\"match_all\":{\"boost\":1.0}},\"search_pipeline\":\"my_pipeline\"}"; + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, restContent)) { + SearchSourceBuilder searchSourceBuilder = SearchSourceBuilder.fromXContent(parser); + searchSourceBuilder = rewrite(searchSourceBuilder); + + try (BytesStreamOutput output = new BytesStreamOutput()) { + searchSourceBuilder.writeTo(output); + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + SearchSourceBuilder deserializedBuilder = new SearchSourceBuilder(in); + String actualContent = deserializedBuilder.toString(); + assertEquals(expectedContent, actualContent); + assertEquals(searchSourceBuilder.hashCode(), deserializedBuilder.hashCode()); + assertNotSame(searchSourceBuilder, deserializedBuilder); + } + } + } + } + public void testAggsParsing() throws IOException { { String restContent = "{\n" diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index f5857922fdff2..b52205996f34b 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -969,6 +969,64 @@ public void testInlinePipeline() throws Exception { } } + public void testInlineDefinedPipeline() throws Exception { + SearchPipelineService searchPipelineService = createWithProcessors(); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "p1", + new PipelineConfiguration( + "p1", + new BytesArray( + "{" + + "\"request_processors\": [{ \"scale_request_size\": { \"scale\" : 2 } }]," + + "\"response_processors\": [{ \"fixed_score\": { \"score\" : 2 } }]" + + "}" + ), + MediaTypeRegistry.JSON + ) + ) + + ); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousState = clusterState; + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousState)); + + SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource().size(100).pipeline("p1"); + SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); + searchRequest.pipeline(searchRequest.source().pipeline()); + + // Verify pipeline + PipelinedRequest pipelinedRequest = syncTransformRequest( + searchPipelineService.resolvePipeline(searchRequest, indexNameExpressionResolver) + ); + Pipeline pipeline = pipelinedRequest.getPipeline(); + assertEquals("p1", pipeline.getId()); + assertEquals(1, pipeline.getSearchRequestProcessors().size()); + assertEquals(1, pipeline.getSearchResponseProcessors().size()); + + // Verify that pipeline transforms request + assertEquals(200, pipelinedRequest.source().size()); + + int size = 10; + SearchHit[] hits = new SearchHit[size]; + for (int i = 0; i < size; i++) { + hits[i] = new SearchHit(i, "doc" + i, Collections.emptyMap(), Collections.emptyMap()); + hits[i].score(i); + } + SearchHits searchHits = new SearchHits(hits, new TotalHits(size * 2, TotalHits.Relation.EQUAL_TO), size); + SearchResponseSections searchResponseSections = new SearchResponseSections(searchHits, null, null, false, false, null, 0); + SearchResponse searchResponse = new SearchResponse(searchResponseSections, null, 1, 1, 0, 10, null, null); + + SearchResponse transformedResponse = syncTransformResponse(pipelinedRequest, searchResponse); + for (int i = 0; i < size; i++) { + assertEquals(2.0, transformedResponse.getHits().getHits()[i].getScore(), 0.0001); + } + } + public void testInfo() { SearchPipelineService searchPipelineService = createWithProcessors(); SearchPipelineInfo info = searchPipelineService.info(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 350c6f9ae8f6b..d17e661615b0d 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1923,11 +1923,6 @@ private final class TestClusterNode { protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { return new MockSinglePrioritizingExecutor(node.getName(), deterministicTaskQueue, threadPool); } - - @Override - protected void connectToNodesAndWait(ClusterState newClusterState) { - // don't do anything, and don't block - } } ); recoverySettings = new RecoverySettings(settings, clusterSettings); @@ -2094,7 +2089,7 @@ public void onFailure(final Exception e) { rerouteService, threadPool ); - nodeConnectionsService = new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService); + nodeConnectionsService = createTestNodeConnectionsService(clusterService.getSettings(), threadPool, transportService); final MetadataMappingService metadataMappingService = new MetadataMappingService(clusterService, indicesService); indicesClusterStateService = new IndicesClusterStateService( settings, @@ -2434,7 +2429,8 @@ public void onFailure(final Exception e) { nodeEnv, indicesService, namedXContentRegistry - ) + ), + new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ) ); actions.put( @@ -2492,6 +2488,24 @@ protected void assertSnapshotOrGenericThread() { } } + public NodeConnectionsService createTestNodeConnectionsService( + Settings settings, + ThreadPool threadPool, + TransportService transportService + ) { + return new NodeConnectionsService(settings, threadPool, transportService) { + @Override + public void connectToNodes(DiscoveryNodes discoveryNodes, Runnable onCompletion) { + // just update targetsByNode to ensure disconnect runs for these nodes + // we rely on disconnect to run for keeping track of pendingDisconnects and ensuring node-joins can happen + for (final DiscoveryNode discoveryNode : discoveryNodes) { + this.targetsByNode.put(discoveryNode, createConnectionTarget(discoveryNode)); + } + onCompletion.run(); + } + }; + } + public ClusterInfoService getMockClusterInfoService() { return clusterInfoService; } @@ -2563,10 +2577,11 @@ public void start(ClusterState initialState) { new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), null ); + coordinator.setNodeConnectionsService(nodeConnectionsService); clusterManagerService.setClusterStatePublisher(coordinator); - coordinator.start(); clusterService.getClusterApplierService().setNodeConnectionsService(nodeConnectionsService); nodeConnectionsService.start(); + coordinator.start(); clusterService.start(); indicesService.start(); indicesClusterStateService.start(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotsInProgressSerializationTests.java index 8fd1f44286094..d79cb62b6b7ac 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -214,7 +214,11 @@ public void testSerDeRemoteStoreIndexShallowCopy() throws IOException { assert (curr_entry.remoteStoreIndexShallowCopy() == false); } } - try (StreamInput in = out.bytes().streamInput()) { + + BytesStreamOutput out2 = new BytesStreamOutput(); + out2.setVersion(Version.V_2_9_0); + snapshotsInProgress.writeTo(out2); + try (StreamInput in = out2.bytes().streamInput()) { in.setVersion(Version.V_2_9_0); actualSnapshotsInProgress = new SnapshotsInProgress(in); assert in.available() == 0; diff --git a/server/src/test/java/org/opensearch/test/NoopDiscovery.java b/server/src/test/java/org/opensearch/test/NoopDiscovery.java index 42d3f1887ab4d..c35503a556db6 100644 --- a/server/src/test/java/org/opensearch/test/NoopDiscovery.java +++ b/server/src/test/java/org/opensearch/test/NoopDiscovery.java @@ -32,6 +32,7 @@ package org.opensearch.test; import org.opensearch.cluster.ClusterChangedEvent; +import org.opensearch.cluster.NodeConnectionsService; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.core.action.ActionListener; @@ -55,6 +56,11 @@ public void startInitialJoin() { } + @Override + public void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) { + + } + @Override public Lifecycle.State lifecycleState() { return null; diff --git a/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java index 1d734a56ef189..fdf762aa096f0 100644 --- a/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java @@ -320,6 +320,50 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti assertEquals(0, nodeDisconnectedCount.get()); } + public void testConnectFailsWhenDisconnectIsPending() { + AtomicInteger nodeConnectedCount = new AtomicInteger(); + AtomicInteger nodeDisconnectedCount = new AtomicInteger(); + connectionManager.addListener(new TransportConnectionListener() { + @Override + public void onNodeConnected(DiscoveryNode node, Transport.Connection connection) { + nodeConnectedCount.incrementAndGet(); + } + + @Override + public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connection) { + nodeDisconnectedCount.incrementAndGet(); + } + }); + + DiscoveryNode node = new DiscoveryNode("", new TransportAddress(InetAddress.getLoopbackAddress(), 0), Version.CURRENT); + ConnectionManager.ConnectionValidator validator = (c, p, l) -> l.onResponse(null); + Transport.Connection connection = new TestConnect(node); + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[2]; + listener.onResponse(connection); + return null; + }).when(transport).openConnection(eq(node), eq(connectionProfile), any(ActionListener.class)); + assertFalse(connectionManager.nodeConnected(node)); + + // Mark connection as pending disconnect, any connection attempt should fail + connectionManager.setPendingDisconnection(node); + PlainActionFuture fut = new PlainActionFuture<>(); + connectionManager.connectToNode(node, connectionProfile, validator, fut); + expectThrows(IllegalStateException.class, () -> fut.actionGet()); + + // clear the pending disconnect and assert that connection succeeds + connectionManager.clearPendingDisconnections(); + assertFalse(connectionManager.nodeConnected(node)); + PlainActionFuture.get( + future -> connectionManager.connectToNode(node, connectionProfile, validator, ActionListener.map(future, x -> null)) + ); + assertFalse(connection.isClosed()); + assertTrue(connectionManager.nodeConnected(node)); + assertEquals(1, connectionManager.size()); + assertEquals(1, nodeConnectedCount.get()); + assertEquals(0, nodeDisconnectedCount.get()); + } + private static class TestConnect extends CloseableConnection { private final DiscoveryNode node; diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 20fd1058a181d..99b0386a48808 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -55,7 +55,7 @@ dependencies { exclude group: 'com.nimbusds' exclude module: "commons-configuration2" } - api "dnsjava:dnsjava:3.6.1" + api "dnsjava:dnsjava:3.6.2" api "org.codehaus.jettison:jettison:${versions.jettison}" api "org.apache.commons:commons-compress:${versions.commonscompress}" api "commons-codec:commons-codec:${versions.commonscodec}" @@ -79,7 +79,7 @@ dependencies { api "org.jboss.xnio:xnio-nio:3.8.16.Final" api 'org.jline:jline:3.26.3' api 'org.apache.commons:commons-configuration2:2.11.0' - api 'com.nimbusds:nimbus-jose-jwt:9.40' + api 'com.nimbusds:nimbus-jose-jwt:9.41.1' api ('org.apache.kerby:kerb-admin:2.1.0') { exclude group: "org.jboss.xnio" exclude group: "org.jline" diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 35ca5d80aeb4e..ded457601c0ae 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -125,7 +125,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getSegmentReplicationRejectionStats(), nodeStats.getRepositoriesStats(), nodeStats.getAdmissionControlStats(), - nodeStats.getNodeCacheStats() + nodeStats.getNodeCacheStats(), + nodeStats.getRemoteStoreNodeStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index b432e5411404e..3efcc538a1b25 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -55,6 +55,7 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterApplierService; import org.opensearch.cluster.service.ClusterService; @@ -1150,9 +1151,12 @@ protected Optional getDisruptableMockTransport(Transpo new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE) ); clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); - clusterService.setNodeConnectionsService( - new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) + NodeConnectionsService nodeConnectionsService = createTestNodeConnectionsService( + clusterService.getSettings(), + threadPool, + transportService ); + clusterService.setNodeConnectionsService(nodeConnectionsService); repositoriesService = new RepositoriesService( settings, clusterService, @@ -1187,6 +1191,7 @@ protected Optional getDisruptableMockTransport(Transpo new ClusterManagerMetrics(NoopMetricsRegistry.INSTANCE), null ); + coordinator.setNodeConnectionsService(nodeConnectionsService); clusterManagerService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( settings, @@ -1588,6 +1593,24 @@ public void onNodeAck(DiscoveryNode node, Exception e) { } } + public static NodeConnectionsService createTestNodeConnectionsService( + Settings settings, + ThreadPool threadPool, + TransportService transportService + ) { + return new NodeConnectionsService(settings, threadPool, transportService) { + @Override + public void connectToNodes(DiscoveryNodes discoveryNodes, Runnable onCompletion) { + // just update targetsByNode to ensure disconnect runs for these nodes + // we rely on disconnect to run for keeping track of pendingDisconnects and ensuring node-joins can happen + for (final DiscoveryNode discoveryNode : discoveryNodes) { + this.targetsByNode.put(discoveryNode, createConnectionTarget(discoveryNode)); + } + onCompletion.run(); + } + }; + } + static class DisruptableClusterApplierService extends ClusterApplierService { private final String nodeName; private final DeterministicTaskQueue deterministicTaskQueue; @@ -1641,11 +1664,6 @@ public void onNewClusterState(String source, Supplier clusterState } } - @Override - protected void connectToNodesAndWait(ClusterState newClusterState) { - // don't do anything, and don't block - } - @Override protected boolean applicationMayFail() { return this.applicationMayFail; diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java index 1ad18bf89d5ba..4f692c7bc8f62 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/DeterministicTaskQueue.java @@ -92,6 +92,12 @@ public void runAllRunnableTasks() { } } + public void runAllRunnableTasksInEnqueuedOrder() { + while (hasRunnableTasks()) { + runTask(0); + } + } + public void runAllTasks() { while (hasDeferredTasks() || hasRunnableTasks()) { if (hasDeferredTasks() && random.nextBoolean()) { @@ -141,6 +147,11 @@ public void runRandomTask() { runTask(RandomNumbers.randomIntBetween(random, 0, runnableTasks.size() - 1)); } + public void runNextTask() { + assert hasRunnableTasks(); + runTask(0); + } + private void runTask(final int index) { final Runnable task = runnableTasks.remove(index); logger.trace("running task {} of {}: {}", index, runnableTasks.size() + 1, task); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index a8bb10fe20752..0bfa70a771f65 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -665,6 +665,16 @@ public void onTimeout(TimeValue timeout) { } } + protected ActionFuture deleteSnapshotBlockedOnClusterManager(String repoName, String snapshotName) { + blockClusterManagerFromDeletingIndexNFile(repoName); + return deleteSnapshot(repoName, snapshotName); + } + + protected ActionFuture deleteSnapshot(String repoName, String snapshotName) { + logger.info("--> Deleting snapshot [{}] to repo [{}]", snapshotName, repoName); + return clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).execute(); + } + protected ActionFuture startFullSnapshotBlockedOnDataNode(String snapshotName, String repoName, String dataNode) throws InterruptedException { blockDataNode(repoName, dataNode); diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 7adff82e72245..fa5fb736f518f 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2752,6 +2752,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat( diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index e474ef202b235..68a2b8086a92e 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -403,7 +403,7 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { private static Boolean segmentsPathFixedPrefix; - private static Boolean snapshotShardPathFixedPrefix; + protected static Boolean snapshotShardPathFixedPrefix; private Path remoteStoreRepositoryPath; @@ -2904,7 +2904,7 @@ private static Settings buildRemoteStoreNodeAttributes( settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED.getKey(), randomBoolean()); settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX.getKey(), translogPathFixedPrefix ? "a" : ""); settings.put(RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX.getKey(), segmentsPathFixedPrefix ? "b" : ""); - settings.put(BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING.getKey(), segmentsPathFixedPrefix ? "c" : ""); + settings.put(BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING.getKey(), snapshotShardPathFixedPrefix ? "c" : ""); return settings.build(); } diff --git a/test/framework/src/main/java/org/opensearch/test/TestLogsAppender.java b/test/framework/src/main/java/org/opensearch/test/TestLogsAppender.java new file mode 100644 index 0000000000000..030f399a5bcc0 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/TestLogsAppender.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.apache.logging.log4j.core.LogEvent; +import org.apache.logging.log4j.core.appender.AbstractAppender; +import org.apache.logging.log4j.core.config.Property; +import org.apache.logging.log4j.core.layout.PatternLayout; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * Test logs appender that provides functionality to extract specific logs/exception messages and wait for it to show up + * @opensearch.internal + */ +public class TestLogsAppender extends AbstractAppender { + private final List capturedLogs = new ArrayList<>(); + private final List messagesToCapture; + + public TestLogsAppender(List messagesToCapture) { + super("TestAppender", null, PatternLayout.createDefaultLayout(), false, Property.EMPTY_ARRAY); + this.messagesToCapture = messagesToCapture; + start(); + } + + @Override + public void append(LogEvent event) { + if (shouldCaptureMessage(event.getMessage().getFormattedMessage())) capturedLogs.add(event.getMessage().getFormattedMessage()); + if (event.getThrown() != null) { + if (shouldCaptureMessage(event.getThrown().toString())) capturedLogs.add(event.getThrown().toString()); + for (StackTraceElement element : event.getThrown().getStackTrace()) + if (shouldCaptureMessage(element.toString())) capturedLogs.add(element.toString()); + } + } + + public boolean shouldCaptureMessage(String log) { + return messagesToCapture.stream().anyMatch(log::contains); + } + + public List getCapturedLogs() { + return new ArrayList<>(capturedLogs); + } + + public boolean waitForLog(String expectedLog, long timeout, TimeUnit unit) { + long startTime = System.currentTimeMillis(); + long timeoutInMillis = unit.toMillis(timeout); + + while (System.currentTimeMillis() - startTime < timeoutInMillis) { + if (capturedLogs.stream().anyMatch(log -> log.contains(expectedLog))) { + return true; + } + try { + Thread.sleep(100); // Wait for 100ms before checking again + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + return false; + } + + // Clear captured logs + public void clearCapturedLogs() { + capturedLogs.clear(); + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java index 37df90fb103a3..d1e1a3e8af17c 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java @@ -123,6 +123,16 @@ public void disconnectFromNode(DiscoveryNode node) { delegate.disconnectFromNode(node); } + @Override + public void setPendingDisconnection(DiscoveryNode node) { + delegate.setPendingDisconnection(node); + } + + @Override + public void clearPendingDisconnections() { + delegate.clearPendingDisconnections(); + } + @Override public int size() { return delegate.size();