diff --git a/CHANGELOG.md b/CHANGELOG.md index d0be4a6454138..fc112cb97ada3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Reader Writer Separation] Add searchOnly replica routing configuration ([#15410](https://github.com/opensearch-project/OpenSearch/pull/15410)) - Add index creation using the context field ([#15290](https://github.com/opensearch-project/OpenSearch/pull/15290)) - [Remote Publication] Add remote download stats ([#15291](https://github.com/opensearch-project/OpenSearch/pull/15291))) +- Add support to upload snapshot shard blobs with hashed prefix ([#15426](https://github.com/opensearch-project/OpenSearch/pull/15426)) - Add canRemain method to TargetPoolAllocationDecider to move shards from local to remote pool for hot to warm tiering ([#15010](https://github.com/opensearch-project/OpenSearch/pull/15010)) - Add support for pluggable deciders for concurrent search ([#15363](https://github.com/opensearch-project/OpenSearch/pull/15363)) - Add support for comma-separated list of index names to be used with Snapshot Status API ([#15409](https://github.com/opensearch-project/OpenSearch/pull/15409))[SnapshotV2] Snapshot Status API changes (#15409)) diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index e073db7276119..1b85a1e227252 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -67,19 +67,11 @@ public void testUrlRepository() throws Exception { logger.info("--> creating repository"); Path repositoryLocation = randomRepoPath(); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); - + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository("test-repo", FsRepository.TYPE, settings); createIndex("test-idx"); ensureGreen(); @@ -115,17 +107,10 @@ public void testUrlRepository() throws Exception { cluster().wipeIndices("test-idx"); logger.info("--> create read-only URL repository"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("url-repo") - .setType(URLRepository.TYPE) - .setSettings( - Settings.builder() - .put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString()) - .put("list_directories", randomBoolean()) - ) - ); + Settings.Builder settingsBuilder = Settings.builder() + .put(URLRepository.URL_SETTING.getKey(), repositoryLocation.toUri().toURL().toString()) + .put("list_directories", randomBoolean()); + createRepository("url-repo", URLRepository.TYPE, settingsBuilder); logger.info("--> restore index after deletion"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() .cluster() diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 176e60a667aef..7f32f09602164 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -38,7 +38,6 @@ import com.azure.storage.blob.models.BlobStorageException; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; @@ -47,6 +46,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.AfterClass; import java.net.HttpURLConnection; @@ -56,7 +56,6 @@ import reactor.core.scheduler.Schedulers; import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { @@ -103,17 +102,11 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("azure") - .setSettings( - Settings.builder() - .put("container", System.getProperty("test.azure.container")) - .put("base_path", System.getProperty("test.azure.base")) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("container", System.getProperty("test.azure.container")) + .put("base_path", System.getProperty("test.azure.base")); + + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, "azure", settings); if (Strings.hasText(System.getProperty("test.azure.sas_token"))) { ensureSasTokenPermissions(); } diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index 1e11b1d111d8f..860b30fdef9ca 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -32,19 +32,18 @@ package org.opensearch.repositories.gcs; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.Strings; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Base64; import java.util.Collection; import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class GoogleCloudStorageThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { @@ -84,16 +83,9 @@ protected SecureSettings credentials() { @Override protected void createRepository(final String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("gcs") - .setSettings( - Settings.builder() - .put("bucket", System.getProperty("test.google.bucket")) - .put("base_path", System.getProperty("test.google.base", "/")) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("bucket", System.getProperty("test.google.bucket")) + .put("base_path", System.getProperty("test.google.base", "/")); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "gcs", settings); } } diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java index ab10691240649..60fdbea011a44 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java @@ -34,12 +34,12 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Collection; @@ -61,20 +61,13 @@ protected SecureSettings credentials() { @Override protected void createRepository(String repoName) { - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("hdfs") - .setSettings( - Settings.builder() - .put("uri", "hdfs:///") - .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) - .put("path", "foo") - .put("chunk_size", randomIntBetween(100, 1000) + "k") - .put("compress", randomBoolean()) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("uri", "hdfs:///") + .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, "hdfs", settings); } // HDFS repository doesn't have precise cleanup stats so we only check whether or not any blobs were removed diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index ce456f26af3a4..130bbbf1d2198 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -35,7 +35,6 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.common.settings.Settings; @@ -45,6 +44,7 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.threadpool.ThreadPool; @@ -63,21 +63,13 @@ protected Collection> getPlugins() { public void testSimpleWorkflow() { Client client = client(); - - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings( - Settings.builder() - .put("uri", "hdfs:///") - .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) - .put("path", "foo") - .put("chunk_size", randomIntBetween(100, 1000) + "k") - .put("compress", randomBoolean()) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put("uri", "hdfs:///") + .put("conf.fs.AbstractFileSystem.hdfs.impl", TestingFs.class.getName()) + .put("path", "foo") + .put("chunk_size", randomIntBetween(100, 1000) + "k") + .put("compress", randomBoolean()); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "hdfs", settings); createIndex("test-idx-1"); createIndex("test-idx-2"); @@ -168,7 +160,7 @@ public void testSimpleWorkflow() { public void testMissingUri() { try { - client().admin().cluster().preparePutRepository("test-repo").setType("hdfs").setSettings(Settings.EMPTY).get(); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", Settings.builder()); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -178,12 +170,8 @@ public void testMissingUri() { public void testEmptyUri() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "/path").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "/path"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -193,12 +181,8 @@ public void testEmptyUri() { public void testNonHdfsUri() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "file:///").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "file:///"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -208,12 +192,8 @@ public void testNonHdfsUri() { public void testPathSpecifiedInHdfs() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "hdfs:///some/path").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "hdfs:///some/path"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); @@ -223,12 +203,8 @@ public void testPathSpecifiedInHdfs() { public void testMissingPath() { try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("hdfs") - .setSettings(Settings.builder().put("uri", "hdfs:///").build()) - .get(); + Settings.Builder settings = Settings.builder().put("uri", "hdfs:///"); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "hdfs", settings); fail(); } catch (RepositoryException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index f7a84864a8569..7db9a0d3ba790 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -33,7 +33,6 @@ import software.amazon.awssdk.services.s3.model.StorageClass; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -43,6 +42,7 @@ import org.opensearch.plugins.Plugin; import org.opensearch.repositories.AbstractThirdPartyRepositoryTestCase; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; import java.util.Collection; @@ -51,7 +51,6 @@ import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.blankOrNullString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { @@ -111,13 +110,7 @@ protected void createRepository(String repoName) { settings.put("storage_class", storageClass); } } - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("s3") - .setSettings(settings) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo", "s3", settings); } @Override diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 573a4f3f51a41..21017160d77e5 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -55,6 +55,7 @@ import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestResponse; import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.rest.FakeRestRequest; @@ -68,7 +69,6 @@ import static org.opensearch.repositories.s3.S3ClientSettings.ACCESS_KEY_SETTING; import static org.opensearch.repositories.s3.S3ClientSettings.SECRET_KEY_SETTING; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -277,14 +277,8 @@ public void sendResponse(RestResponse response) { } private void createRepository(final String name, final Settings repositorySettings) { - assertAcked( - client().admin() - .cluster() - .preparePutRepository(name) - .setType(S3Repository.TYPE) - .setVerify(false) - .setSettings(repositorySettings) - ); + Settings.Builder settings = Settings.builder().put(repositorySettings); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), name, S3Repository.TYPE, false, settings); } /** diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 36fe3748e9d10..6c0a156eb6752 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -55,13 +55,17 @@ public void testPutRepositoryWithBlocks() { logger.info("--> registering a repository is blocked when the cluster is read only"); try { setClusterReadOnly(true); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); assertBlocked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())), + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client().admin().cluster(), + "test-repo-blocks", + "fs", + false, + settings, + null, + false + ), Metadata.CLUSTER_READ_ONLY_BLOCK ); } finally { @@ -69,25 +73,13 @@ public void testPutRepositoryWithBlocks() { } logger.info("--> registering a repository is allowed when the cluster is not read only"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); } public void testVerifyRepositoryWithBlocks() { - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { @@ -104,14 +96,8 @@ public void testVerifyRepositoryWithBlocks() { } public void testDeleteRepositoryWithBlocks() { - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); logger.info("--> deleting a repository is blocked when the cluster is read only"); try { @@ -126,14 +112,8 @@ public void testDeleteRepositoryWithBlocks() { } public void testGetRepositoryWithBlocks() { - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo-blocks") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), "test-repo-blocks", "fs", false, settings); // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index 78fb01b07b6b1..0f29f02b284a4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -80,13 +80,8 @@ protected void setUpRepository() throws Exception { logger.info("--> register a repository"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), REPOSITORY_NAME, "fs", settings); logger.info("--> verify the repository"); VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java index 009f5111078de..abce2fc878f27 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/RemoteCloneIndexIT.java @@ -224,7 +224,7 @@ protected void setLowPriorityUploadRate(String repoName, String value) throws Ex Settings.Builder settings = Settings.builder() .put("location", rmd.settings().get("location")) .put("max_remote_low_priority_upload_bytes_per_sec", value); - assertAcked(client().admin().cluster().preparePutRepository(repoName).setType(rmd.type()).setSettings(settings).get()); + createRepository(repoName, rmd.type(), settings); } public void testCreateCloneIndexFailure() throws ExecutionException, InterruptedException { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 2d268a26a5755..ea45173cdbf7f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -365,13 +365,8 @@ public void testRestoreSnapshotAllocationDoesNotExceedWatermark() throws Excepti final String dataNodeName = internalCluster().startDataOnlyNode(); ensureStableCluster(3); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("repo") - .setType(FsRepository.TYPE) - .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) - ); + Settings.Builder settings = Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean()); + createRepository("repo", FsRepository.TYPE, settings); final InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) internalCluster() .getCurrentClusterManagerNodeInstance(ClusterInfoService.class); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index 5eef7074e1dd6..3718dce538053 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -494,8 +494,7 @@ public void testRestoreSnapshotOverLimit() { repoSettings.put("location", randomRepoPath()); repoSettings.put("compress", randomBoolean()); repoSettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); - - assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(repoSettings.build())); + createRepository("test-repo", "fs", repoSettings); int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index f46f413f4d23f..3ee506f58a9d7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -577,18 +577,12 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I // the other problem here why we can't corrupt segments.X files is that the snapshot flushes again before // it snapshots and that will write a new segments.X+1 file logger.info("--> creating repository"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath().toAbsolutePath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath().toAbsolutePath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository("test-repo", "fs", settings); + logger.info("--> snapshot"); final CreateSnapshotResponse createSnapshotResponse = client().admin() .cluster() @@ -761,18 +755,11 @@ public void testPrimaryCorruptionDuringReplicationDoesNotFailReplicaShard() thro // Create a snapshot repository. This repo is used to take a snapshot after // corrupting a file, which causes the node to notice the corrupt data and // close the shard. - assertAcked( - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath().toAbsolutePath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath().toAbsolutePath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository("test-repo", "fs", settings); client().prepareIndex("test").setSource("field", "value").execute(); indexingInFlight.await(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java index 06d2d2a90de87..0d3c8307c060f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java @@ -51,7 +51,6 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; @@ -271,14 +270,8 @@ public void testSpecifiedIndexUnavailableSnapshotRestore() throws Exception { createIndex("test1"); ensureGreen("test1"); waitForRelocation(); + createRepository("dummy-repo", "fs", Settings.builder().put("location", randomRepoPath())); - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); verify(snapshot("snap2", "test1", "test2"), true); @@ -391,13 +384,8 @@ public void testWildcardBehaviourSnapshotRestore() throws Exception { ensureGreen("foobar"); waitForRelocation(); - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("dummy-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + createRepository("dummy-repo", "fs", Settings.builder().put("location", randomRepoPath())); + client().admin().cluster().prepareCreateSnapshot("dummy-repo", "snap1").setWaitForCompletion(true).get(); IndicesOptions options = IndicesOptions.fromOptions(false, false, true, false); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index cf93a432d0371..68b29851c6c04 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -741,14 +741,7 @@ public void testSnapshotRecovery() throws Exception { String nodeA = internalCluster().startNode(); logger.info("--> create repository"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPO_NAME) - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", false)) - .get() - ); + createRepository(REPO_NAME, "fs", Settings.builder().put("location", randomRepoPath()).put("compress", false)); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java index e4e681a5433b5..17a9c3ddbe317 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/MigrationBaseTestCase.java @@ -108,9 +108,7 @@ protected void setFailRate(String repoName, int value) throws ExecutionException Settings.Builder settings = Settings.builder() .put("location", rmd.settings().get("location")) .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); - assertAcked( - client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() - ); + createRepository(repoName, ReloadableFsRepository.TYPE, settings); } public void initDocRepToRemoteMigration() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java index 377bd9529ca7a..c701a8d92c336 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationSettingsUpdateIT.java @@ -21,7 +21,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.MIXED; import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; import static org.opensearch.node.remotestore.RemoteStoreNodeService.Direction.REMOTE_STORE; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreMigrationSettingsUpdateIT extends RemoteStoreMigrationShardAllocationBaseTestCase { @@ -92,11 +91,7 @@ public void testNewRestoredIndexIsRemoteStoreBackedForRemoteStoreDirectionAndMix String snapshotName = "test-snapshot"; String snapshotRepoName = "test-restore-snapshot-repo"; Path snapshotRepoNameAbsolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(snapshotRepoName) - .setType("fs") - .setSettings(Settings.builder().put("location", snapshotRepoNameAbsolutePath)) - ); + createRepository(snapshotRepoName, "fs", Settings.builder().put("location", snapshotRepoNameAbsolutePath)); logger.info("Create snapshot of non remote stored backed index"); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java index 4d37b2a1feb88..7d29e5d328492 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotemigration/RemoteStoreMigrationTestCase.java @@ -116,15 +116,10 @@ public void testNoShallowSnapshotInMixedMode() throws Exception { logger.info("Create shallow snapshot setting enabled repo"); String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; Path shallowSnapshotRepoPath = randomRepoPath(); - assertAcked( - clusterAdmin().preparePutRepository(shallowSnapshotRepoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", shallowSnapshotRepoPath) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", shallowSnapshotRepoPath) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); + createRepository(shallowSnapshotRepoName, "fs", settings); logger.info("Verify shallow snapshot creation"); final String snapshot1 = "snapshot1"; diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 6994b731d123c..56078a6ef8800 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -803,20 +803,14 @@ public void testCreateSnapshotV2() throws Exception { Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -867,20 +861,14 @@ public void testMixedSnapshotCreationWithV2RepositorySetting() throws Exception Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); + Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); createIndex(indexName1, indexSettings); @@ -901,20 +889,14 @@ public void testMixedSnapshotCreationWithV2RepositorySetting() throws Exception assertThat(snapshotInfo.getPinnedTimestamp(), equalTo(0L)); // enable shallow_snapshot_v2 - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); + indexDocuments(client, indexName1, 10); indexDocuments(client, indexName2, 20); @@ -948,20 +930,13 @@ public void testConcurrentSnapshotV2CreateOperation() throws InterruptedExceptio Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1031,20 +1006,13 @@ public void testCreateSnapshotV2WithRedIndex() throws Exception { Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1086,20 +1054,13 @@ public void testCreateSnapshotV2WithIndexingLoad() throws Exception { Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1161,20 +1122,13 @@ public void testCreateSnapshotV2WithShallowCopySettingDisabled() throws Exceptio Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1216,20 +1170,13 @@ public void testClusterManagerFailoverDuringSnapshotCreation() throws Exception Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - ); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1295,21 +1242,14 @@ public void testConcurrentV1SnapshotAndV2RepoSettingUpdate() throws Exception { String snapshotName1 = "test-create-snapshot-v1"; Path absolutePath1 = randomRepoPath().toAbsolutePath(); logger.info("Snapshot Path [{}]", absolutePath1); + Settings.Builder settings = Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false); + createRepository(snapshotRepoName, FsRepository.TYPE, settings); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), false) - ) - ); Client client = client(); Settings indexSettings = getIndexSettings(20, 0).build(); @@ -1346,19 +1286,16 @@ public void testConcurrentV1SnapshotAndV2RepoSettingUpdate() throws Exception { assertThrows( IllegalStateException.class, - () -> client().admin() - .cluster() - .preparePutRepository(snapshotRepoName) - .setType(FsRepository.TYPE) - .setSettings( - Settings.builder() - .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) - .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) - .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) - ) - .get() + () -> createRepository( + snapshotRepoName, + FsRepository.TYPE, + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), absolutePath1) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .put(BlobStoreRepository.SHALLOW_SNAPSHOT_V2.getKey(), true) + ) ); } catch (Exception e) { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index f83ae3e0ca820..ba06bb463e5a8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -190,9 +190,7 @@ protected void setFailRate(String repoName, int value) throws ExecutionException Settings.Builder settings = Settings.builder() .put("location", rmd.settings().get("location")) .put(REPOSITORIES_FAILRATE_SETTING.getKey(), value); - assertAcked( - client().admin().cluster().preparePutRepository(repoName).setType(ReloadableFsRepository.TYPE).setSettings(settings).get() - ); + createRepository(repoName, ReloadableFsRepository.TYPE, settings); } public Settings indexSettings() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java index 11260e0914dc5..6ec973090883b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreClusterStateRestoreIT.java @@ -36,6 +36,8 @@ import org.opensearch.gateway.remote.ClusterMetadataManifest; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.RemoteClusterStateService; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.Before; @@ -476,14 +478,7 @@ public void testFullClusterRestoreGlobalMetadata() throws Exception { private Path registerCustomRepository() { Path path = randomRepoPath(); - assertAcked( - client().admin() - .cluster() - .preparePutRepository("custom-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", path).put("compress", false)) - .get() - ); + createRepository("custom-repo", "fs", Settings.builder().put("location", path).put("compress", false)); return path; } @@ -493,9 +488,15 @@ private void verifyRestoredRepositories(Path repoPath) { assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_NAME).settings())); assertTrue(SYSTEM_REPOSITORY_SETTING.get(repositoriesMetadata.repository(REPOSITORY_2_NAME).settings())); assertEquals("fs", repositoriesMetadata.repository("custom-repo").type()); + Settings settings = repositoriesMetadata.repository("custom-repo").settings(); + PathType pathType = BlobStoreRepository.SHARD_PATH_TYPE.get(settings); assertEquals( - Settings.builder().put("location", repoPath).put("compress", false).build(), - repositoriesMetadata.repository("custom-repo").settings() + Settings.builder() + .put("location", repoPath) + .put("compress", false) + .put(BlobStoreRepository.SHARD_PATH_TYPE.getKey(), pathType) + .build(), + settings ); // repo cleanup post verification diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 08ce35f0911ac..9a8d3651160c0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -511,9 +511,7 @@ public void testRestoreSnapshotToIndexWithSameNameDifferentUUID() throws Excepti List dataNodes = internalCluster().startDataOnlyNodes(2); Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); + createRepository("test-repo", "fs", Settings.builder().put("location", absolutePath)); logger.info("--> Create index and ingest 50 docs"); createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java index b0827dcfe4892..4cbafde6417af 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -131,13 +131,15 @@ public void testMultiNodeClusterRandomNodeRecoverNetworkIsolationPostNonRestrict .get(0); Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); updatedSettings.remove("system_repository"); - - client.admin() - .cluster() - .preparePutRepository(repositoryMetadata.name()) - .setType(repositoryMetadata.type()) - .setSettings(updatedSettings) - .get(); + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client.admin().cluster(), + repositoryMetadata.name(), + repositoryMetadata.type(), + true, + updatedSettings, + null, + false + ).get(); ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); networkDisruption.stopDisrupting(); @@ -161,12 +163,7 @@ public void testNodeRestartPostNonRestrictedSettingsUpdate() throws Exception { Settings.Builder updatedSettings = Settings.builder().put(repositoryMetadata.settings()).put("chunk_size", new ByteSizeValue(20)); updatedSettings.remove("system_repository"); - client.admin() - .cluster() - .preparePutRepository(repositoryMetadata.name()) - .setType(repositoryMetadata.type()) - .setSettings(updatedSettings) - .get(); + createRepository(repositoryMetadata.name(), repositoryMetadata.type(), updatedSettings); internalCluster().restartRandomDataNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 195b2d18df7cc..8c7789846935d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -478,15 +478,7 @@ public void testRateLimitedRemoteDownloads() throws Exception { Settings.Builder settings = Settings.builder(); settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); settings.put("location", segmentRepoPath).put("max_remote_download_bytes_per_sec", 4, ByteSizeUnit.KB); - - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType(ReloadableFsRepository.TYPE) - .setSettings(settings) - .get() - ); + createRepository(REPOSITORY_NAME, ReloadableFsRepository.TYPE, settings); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); @@ -515,14 +507,7 @@ public void testRateLimitedRemoteDownloads() throws Exception { // revert repo metadata to pass asserts on repo metadata vs. node attrs during teardown // https://github.com/opensearch-project/OpenSearch/pull/9569#discussion_r1345668700 settings.remove("max_remote_download_bytes_per_sec"); - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType(ReloadableFsRepository.TYPE) - .setSettings(settings) - .get() - ); + createRepository(REPOSITORY_NAME, ReloadableFsRepository.TYPE, settings); for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) { Repository segmentRepo = repositoriesService.repository(REPOSITORY_NAME); assertNull(segmentRepo.getMetadata().settings().get("max_remote_download_bytes_per_sec")); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index 3dfde6f472525..99cc58848a04a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -31,7 +31,6 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -119,19 +118,12 @@ public void testRateLimitedRemoteUploads() throws Exception { internalCluster().startNode(clusterSettings.build()); Client client = client(); logger.info("--> updating repository"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType(MockFsRepositoryPlugin.TYPE) - .setSettings( - Settings.builder() - .put("location", repositoryLocation) - .put("compress", compress) - .put("max_remote_upload_bytes_per_sec", "1kb") - .put("chunk_size", 100, ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repositoryLocation) + .put("compress", compress) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES); + createRepository(REPOSITORY_NAME, MockFsRepositoryPlugin.TYPE, settings); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java index b8415f4b41815..e4347fef744ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/RepositoriesServiceIT.java @@ -45,7 +45,6 @@ import java.util.Collection; import java.util.Collections; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -70,9 +69,12 @@ public void testUpdateRepository() { .next(); final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); - - assertAcked( - client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + FsRepository.TYPE, + true, + repoSettings ); final GetRepositoriesResponse originalGetRepositoriesResponse = client.admin() @@ -91,8 +93,12 @@ public void testUpdateRepository() { final boolean updated = randomBoolean(); final String updatedRepositoryType = updated ? "mock" : FsRepository.TYPE; - assertAcked( - client.admin().cluster().preparePutRepository(repositoryName).setType(updatedRepositoryType).setSettings(repoSettings).get() + OpenSearchIntegTestCase.putRepositoryWithNoSettingOverrides( + client().admin().cluster(), + repositoryName, + updatedRepositoryType, + true, + repoSettings ); final GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() @@ -112,12 +118,8 @@ public void testUpdateRepository() { public void testSystemRepositoryCantBeCreated() { internalCluster(); final String repositoryName = "test-repo"; - final Client client = client(); final Settings.Builder repoSettings = Settings.builder().put("system_repository", true).put("location", randomRepoPath()); - assertThrows( - RepositoryException.class, - () -> client.admin().cluster().preparePutRepository(repositoryName).setType(FsRepository.TYPE).setSettings(repoSettings).get() - ); + assertThrows(RepositoryException.class, () -> createRepository(repositoryName, FsRepository.TYPE, repoSettings)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java index 9057ef900efbd..34075b78e98af 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/fs/FsBlobStoreRepositoryIT.java @@ -76,19 +76,11 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce final Path repoPath = randomRepoPath(); logger.info("--> creating repository {} at {}", repoName, repoPath); - - assertAcked( - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", repoPath) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repoPath) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository(repoName, "fs", settings); final String indexName = randomName(); int docCount = iterations(10, 1000); @@ -112,14 +104,7 @@ public void testMissingDirectoriesNotCreatedInReadonlyRepository() throws IOExce IOUtils.rm(deletedPath); } assertFalse(Files.exists(deletedPath)); - - assertAcked( - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings(Settings.builder().put("location", repoPath).put("readonly", true)) - ); + createRepository(repoName, "fs", Settings.builder().put("location", repoPath).put("readonly", true)); final OpenSearchException exception = expectThrows( OpenSearchException.class, diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java index fbf2acf7b08a6..9c784e5c80fea 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -72,6 +72,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.test.OpenSearchIntegTestCase.resolvePath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertFileExists; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; @@ -140,18 +141,11 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { assertAcked(client.admin().cluster().prepareDeleteRepository(repoName)); logger.info("--> recreate repository"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", repo) - .put("compress", false) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repo) + .put("compress", false) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository(repoName, "fs", settings); startDeleteSnapshot(repoName, snapshot).get(); @@ -168,20 +162,12 @@ public void testConcurrentlyChangeRepositoryContentsInBwCMode() throws Exception Path repo = randomRepoPath(); final String repoName = "test-repo"; logger.info("--> creating repository at {}", repo.toAbsolutePath()); - assertAcked( - client.admin() - .cluster() - .preparePutRepository(repoName) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", repo) - .put("compress", false) - .put(BlobStoreRepository.ALLOW_CONCURRENT_MODIFICATION.getKey(), true) - .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES) - ) - ); - + Settings.Builder settings = Settings.builder() + .put("location", repo) + .put("compress", false) + .put(BlobStoreRepository.ALLOW_CONCURRENT_MODIFICATION.getKey(), true) + .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + createRepository(repoName, "fs", settings); createIndex("test-idx-1", "test-idx-2"); logger.info("--> indexing some data"); indexRandom( @@ -472,7 +458,7 @@ public void testHandleSnapshotErrorWithBwCFormat() throws IOException, Execution // not break subsequent repository operations logger.info("--> move shard level metadata to new generation"); final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); + final Path shardPath = repoPath.resolve(resolvePath(indexId, "0")); final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); assertFileExists(initialShardMetaPath); Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "1")); @@ -501,8 +487,9 @@ public void testRepairBrokenShardGenerations() throws Exception { logger.info("--> move shard level metadata to new generation and make RepositoryData point at an older generation"); final IndexId indexId = getRepositoryData(repoName).resolveIndexId(indexName); - final Path shardPath = repoPath.resolve("indices").resolve(indexId.getId()).resolve("0"); + final Path shardPath = repoPath.resolve(resolvePath(indexId, "0")); final Path initialShardMetaPath = shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + "0"); + assertFileExists(initialShardMetaPath); Files.move(initialShardMetaPath, shardPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + randomIntBetween(1, 1000))); @@ -564,9 +551,7 @@ public void testSnapshotWithCorruptedShardIndexFile() throws Exception { assertThat(indexIds.size(), equalTo(1)); final IndexId corruptedIndex = indexIds.get(indexName); - final Path shardIndexFile = repo.resolve("indices") - .resolve(corruptedIndex.getId()) - .resolve("0") + final Path shardIndexFile = repo.resolve(resolvePath(corruptedIndex, "0")) .resolve("index-" + repositoryData.shardGenerations().getShardGen(corruptedIndex, 0)); logger.info("--> truncating shard index file [{}]", shardIndexFile); @@ -641,7 +626,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> delete index metadata and shard metadata"); for (String index : indices) { - Path shardZero = indicesPath.resolve(indexIds.get(index).getId()).resolve("0"); + Path shardZero = repo.resolve(resolvePath(indexIds.get(index), "0")); if (randomBoolean()) { Files.delete( shardZero.resolve("index-" + getRepositoryData("test-repo").shardGenerations().getShardGen(indexIds.get(index), 0)) @@ -834,10 +819,9 @@ public void testSnapshotWithMissingShardLevelIndexFile() throws Exception { clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get(); logger.info("--> deleting shard level index file"); - final Path indicesPath = repo.resolve("indices"); for (IndexId indexId : getRepositoryData("test-repo").getIndices().values()) { final Path shardGen; - try (Stream shardFiles = Files.list(indicesPath.resolve(indexId.getId()).resolve("0"))) { + try (Stream shardFiles = Files.list(repo.resolve(resolvePath(indexId, "0")))) { shardGen = shardFiles.filter(file -> file.getFileName().toString().startsWith(BlobStoreRepository.INDEX_FILE_PREFIX)) .findFirst() .orElseThrow(() -> new AssertionError("Failed to find shard index blob")); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 2b88dbc64e2af..5b581eed3a944 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -88,6 +88,7 @@ import org.opensearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.TestCustomMetadata; @@ -761,18 +762,26 @@ public void testRegistrationFailure() { internalCluster().startNode(nonClusterManagerNode()); // Register mock repositories for (int i = 0; i < 5; i++) { - clusterAdmin().preparePutRepository("test-repo" + i) - .setType("mock") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .setVerify(false) - .get(); + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + clusterAdmin(), + "test-repo" + i, + "mock", + false, + Settings.builder().put("location", randomRepoPath()), + null, + false + ).get(); } logger.info("--> make sure that properly setup repository can be registered on all nodes"); - clusterAdmin().preparePutRepository("test-repo-0") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get(); - + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + clusterAdmin(), + "test-repo-0", + "fs", + true, + Settings.builder().put("location", randomRepoPath()), + null, + false + ).get(); } public void testThatSensitiveRepositorySettingsAreNotExposed() throws Exception { @@ -981,11 +990,7 @@ public void testSnapshotWithDateMath() { final String snapshotName = ""; logger.info("--> creating repository"); - assertAcked( - clusterAdmin().preparePutRepository(repo) - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())) - ); + createRepository(repo, "fs", Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())); final String expression1 = nameExpressionResolver.resolveDateMathExpression(snapshotName); logger.info("--> creating date math snapshot"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java index 1c46e37dea93a..c96d4a2f079ee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/MultiClusterRepoAccessIT.java @@ -115,13 +115,15 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { secondCluster.startClusterManagerOnlyNode(); secondCluster.startDataOnlyNode(); - secondCluster.client() - .admin() - .cluster() - .preparePutRepository(repoNameOnSecondCluster) - .setType("fs") - .setSettings(Settings.builder().put("location", repoPath)) - .get(); + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + secondCluster.client().admin().cluster(), + repoNameOnSecondCluster, + "fs", + true, + Settings.builder().put("location", repoPath), + null, + false + ).get(); createIndexWithRandomDocs("test-idx-1", randomIntBetween(1, 100)); createFullSnapshot(repoNameOnFirstCluster, "snap-1"); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index dd40c77ba918d..271fcf166139f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -33,6 +33,7 @@ package org.opensearch.snapshots; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.bulk.BulkRequest; @@ -56,7 +57,6 @@ import java.nio.file.Path; import java.util.List; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -110,19 +110,17 @@ public void testRepositoryCreation() throws Exception { assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue()); assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue()); + RepositoryMetadata testRepo1Md = findRepository(repositoriesResponse.repositories(), "test-repo-1"); + logger.info("--> check that trying to create a repository with the same settings repeatedly does not update cluster state"); String beforeStateUuid = clusterStateResponse.getState().stateUUID(); - assertThat( - client.admin() - .cluster() - .preparePutRepository("test-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", location)) - .get() - .isAcknowledged(), - equalTo(true) - ); - assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); + createRepository("test-repo-1", "fs", Settings.builder().put("location", location)); + repositoriesResponse = client.admin().cluster().prepareGetRepositories(randomFrom("_all", "*", "test-repo-*")).get(); + RepositoryMetadata testRepo1MdAfterUpdate = findRepository(repositoriesResponse.repositories(), "test-repo-1"); + + if (testRepo1Md.settings().equals(testRepo1MdAfterUpdate.settings())) { + assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID()); + } logger.info("--> delete repository test-repo-1"); client.admin().cluster().prepareDeleteRepository("test-repo-1").get(); @@ -225,12 +223,7 @@ public void testMisconfiguredRepository() throws Exception { Path invalidRepoPath = createTempDir().toAbsolutePath(); String location = invalidRepoPath.toString(); try { - client().admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", location)) - .get(); + createRepository("test-repo", "fs", Settings.builder().put("location", location)); fail("Shouldn't be here"); } catch (RepositoryException ex) { assertThat( @@ -242,33 +235,28 @@ public void testMisconfiguredRepository() throws Exception { public void testRepositoryAckTimeout() throws Exception { logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack"); - AcknowledgedResponse putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo-1") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES) - ) - .setTimeout("0s") - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false)); + + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES); + PutRepositoryRequestBuilder requestBuilder = OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client().admin().cluster(), + "test-repo-1", + "fs", + true, + settings, + "0s", + false + ); + assertFalse(requestBuilder.get().isAcknowledged()); logger.info("--> creating repository test-repo-2 with standard timeout - should ack"); - putRepositoryResponse = client().admin() - .cluster() - .preparePutRepository("test-repo-2") - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + settings = Settings.builder() + .put("location", randomRepoPath()) + .put("compress", randomBoolean()) + .put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES); + createRepository("test-repo-2", "fs", settings); logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack"); AcknowledgedResponse deleteRepositoryResponse = client().admin() @@ -292,25 +280,45 @@ public void testRepositoryVerification() throws Exception { Settings readonlySettings = Settings.builder().put(settings).put("readonly", true).build(); logger.info("--> creating repository that cannot write any files - should fail"); assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings), + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client.admin().cluster(), + "test-repo-1", + "mock", + true, + Settings.builder().put(settings), + null, + false + ), RepositoryVerificationException.class ); logger.info("--> creating read-only repository that cannot read any files - should fail"); assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings), + OpenSearchIntegTestCase.putRepositoryRequestBuilder( + client.admin().cluster(), + "test-repo-2", + "mock", + true, + Settings.builder().put(readonlySettings), + null, + false + ), RepositoryVerificationException.class ); logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked"); - assertAcked(client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings).setVerify(false)); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo-1", "mock", false, Settings.builder().put(settings)); logger.info("--> verifying repository"); assertRequestBuilderThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class); logger.info("--> creating read-only repository that cannot read any files, but suppress verification - should be acked"); - assertAcked( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings).setVerify(false) + OpenSearchIntegTestCase.putRepository( + client.admin().cluster(), + "test-repo-2", + "mock", + false, + Settings.builder().put(readonlySettings) ); logger.info("--> verifying repository"); @@ -320,12 +328,8 @@ public void testRepositoryVerification() throws Exception { logger.info("--> creating repository"); try { - client.admin() - .cluster() - .preparePutRepository("test-repo-1") - .setType("mock") - .setSettings(Settings.builder().put("location", location).put("localize_location", true)) - .get(); + Settings.Builder settingsBuilder = Settings.builder().put("location", location).put("localize_location", true); + createRepository("test-repo-1", "mock", settingsBuilder); fail("RepositoryVerificationException wasn't generated"); } catch (RepositoryVerificationException ex) { assertThat(ex.getMessage(), containsString("is not shared")); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java index 0bebe969b3f3e..4187ecdf5f283 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java @@ -59,7 +59,6 @@ import java.util.Map; import java.util.function.Function; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.is; public class RepositoryFilterUserMetadataIT extends OpenSearchIntegTestCase { @@ -72,17 +71,10 @@ protected Collection> nodePlugins() { public void testFilteredRepoMetadataIsUsed() { final String clusterManagerName = internalCluster().getClusterManagerName(); final String repoName = "test-repo"; - assertAcked( - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType(MetadataFilteringPlugin.TYPE) - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put(MetadataFilteringPlugin.CLUSTER_MANAGER_SETTING_VALUE, clusterManagerName) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath()) + .put(MetadataFilteringPlugin.CLUSTER_MANAGER_SETTING_VALUE, clusterManagerName); + createRepository(repoName, MetadataFilteringPlugin.TYPE, settings); createIndex("test-idx"); final SnapshotInfo snapshotInfo = client().admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java index 75c3a0a23de37..2d48882e43739 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -82,6 +82,7 @@ import org.opensearch.repositories.RepositoryException; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; import java.nio.channels.SeekableByteChannel; @@ -389,17 +390,11 @@ public void testSnapshotFileFailureDuringSnapshot() throws InterruptedException disableRepoConsistencyCheck("This test uses a purposely broken repository so it would fail consistency checks"); logger.info("--> creating repository"); - assertAcked( - clusterAdmin().preparePutRepository("test-repo") - .setType("mock") - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put("random", randomAlphaOfLength(10)) - .put("random_control_io_exception_rate", 0.2) - ) - .setVerify(false) - ); + Settings.Builder settings = Settings.builder() + .put("location", randomRepoPath()) + .put("random", randomAlphaOfLength(10)) + .put("random_control_io_exception_rate", 0.2); + OpenSearchIntegTestCase.putRepository(clusterAdmin(), "test-repo", "mock", false, settings); createIndexWithRandomDocs("test-idx", 100); @@ -690,11 +685,8 @@ private void unrestorableUseCase( assertAcked(client().admin().indices().prepareDelete(indexName)); // update the test repository - assertAcked( - clusterAdmin().preparePutRepository("test-repo") - .setType("mock") - .setSettings(Settings.builder().put("location", repositoryLocation).put(repositorySettings).build()) - ); + Settings.Builder settings = Settings.builder().put("location", repositoryLocation).put(repositorySettings); + OpenSearchIntegTestCase.putRepository(clusterAdmin(), "test-repo", "mock", settings); // attempt to restore the snapshot with the given settings RestoreSnapshotResponse restoreResponse = clusterAdmin().prepareRestoreSnapshot("test-repo", "test-snap") @@ -1015,27 +1007,17 @@ public void testDeleteRepositoryWhileSnapshotting() throws Exception { } logger.info("--> trying to move repository to another location"); + Settings.Builder settings = Settings.builder().put("location", repositoryLocation.resolve("test")); try { - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) - .get(); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "fs", settings); fail("shouldn't be able to replace in-use repository"); } catch (Exception ex) { logger.info("--> in-use repository replacement failed"); } logger.info("--> trying to create a repository with different name"); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo-2") - .setVerify(false) // do not do verification itself as snapshot threads could be fully blocked - .setType("fs") - .setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))) - ); + Settings.Builder settingsBuilder = Settings.builder().put("location", repositoryLocation.resolve("test")); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo-2", "fs", false, settingsBuilder); logger.info("--> unblocking blocked node"); unblockNode("test-repo", blockedNode); @@ -1941,20 +1923,12 @@ public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception { logger.info("--> creating repository"); final Path repoPath = randomRepoPath(); final Client client = client(); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("mock") - .setVerify(false) - .setSettings( - Settings.builder() - .put("location", repoPath) - .put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f) - // test that we can take a snapshot after a failed one, even if a partial index-N was written - .put("random", randomAlphaOfLength(10)) - ) - ); + Settings.Builder settings = Settings.builder() + .put("location", repoPath) + .put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f) + // test that we can take a snapshot after a failed one, even if a partial index-N was written + .put("random", randomAlphaOfLength(10)); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "mock", false, settings); assertAcked( prepareCreate("test-idx").setSettings( @@ -2004,14 +1978,8 @@ public void testGetSnapshotsFromIndexBlobOnly() throws Exception { logger.info("--> creating repository"); final Path repoPath = randomRepoPath(); final Client client = client(); - assertAcked( - client.admin() - .cluster() - .preparePutRepository("test-repo") - .setType("fs") - .setVerify(false) - .setSettings(Settings.builder().put("location", repoPath)) - ); + Settings.Builder settings = Settings.builder().put("location", repoPath); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), "test-repo", "fs", false, settings); logger.info("--> creating random number of indices"); final int numIndices = randomIntBetween(1, 10); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index 513c1fa578589..5a043e69e9735 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -54,6 +54,7 @@ import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.rest.RestStatus; import org.opensearch.index.IndexNotFoundException; +import org.opensearch.repositories.IndexId; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.threadpool.ThreadPool; @@ -70,6 +71,7 @@ import java.util.stream.Collectors; import static org.opensearch.snapshots.SnapshotsService.MAX_SHARDS_ALLOWED_IN_STATUS_API; +import static org.opensearch.test.OpenSearchIntegTestCase.resolvePath; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -208,11 +210,9 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { final SnapshotInfo snapshotInfo = createFullSnapshot("test-repo", "test-snap"); logger.info("--> delete shard-level snap-${uuid}.dat file for one shard in this snapshot to simulate concurrent delete"); - final String indexRepoId = getRepositoryData("test-repo").resolveIndexId(snapshotInfo.indices().get(0)).getId(); + IndexId indexId = getRepositoryData("test-repo").resolveIndexId(snapshotInfo.indices().get(0)); IOUtils.rm( - repoPath.resolve("indices") - .resolve(indexRepoId) - .resolve("0") + repoPath.resolve(resolvePath(indexId, "0")) .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") ); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java index 28b84655a2cc7..bb5cc89d4e1d5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SystemRepositoryIT.java @@ -8,7 +8,6 @@ package org.opensearch.snapshots; -import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.repositories.RepositoryException; @@ -19,7 +18,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class SystemRepositoryIT extends AbstractSnapshotIntegTestCase { @@ -43,13 +41,8 @@ public void testRestrictedSettingsCantBeUpdated() { disableRepoConsistencyCheck("System repository is being used for the test"); internalCluster().startNode(); - final Client client = client(); final Settings.Builder repoSettings = Settings.builder().put("location", randomRepoPath()); - - RepositoryException e = expectThrows( - RepositoryException.class, - () -> client.admin().cluster().preparePutRepository(systemRepoName).setType("mock").setSettings(repoSettings).get() - ); + RepositoryException e = expectThrows(RepositoryException.class, () -> createRepository(systemRepoName, "mock", repoSettings)); assertEquals( e.getMessage(), "[system-repo-name] trying to modify an unmodifiable attribute type of system " @@ -59,18 +52,8 @@ public void testRestrictedSettingsCantBeUpdated() { public void testSystemRepositoryNonRestrictedSettingsCanBeUpdated() { disableRepoConsistencyCheck("System repository is being used for the test"); - internalCluster().startNode(); - final Client client = client(); final Settings.Builder repoSettings = Settings.builder().put("location", absolutePath).put("chunk_size", new ByteSizeValue(20)); - - assertAcked( - client.admin() - .cluster() - .preparePutRepository(systemRepoName) - .setType(ReloadableFsRepository.TYPE) - .setSettings(repoSettings) - .get() - ); + createRepository(systemRepoName, ReloadableFsRepository.TYPE, repoSettings); } } diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index af9e5e265fab9..bddbe963e8013 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -224,6 +224,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID, IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME, IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID, + IndexSettings.SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE, // Settings for remote translog IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index a1ee67f6dba37..8d8bf88bb82e4 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -50,6 +50,7 @@ import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.index.Index; import org.opensearch.index.compositeindex.datacube.startree.StarTreeIndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.translog.Translog; @@ -682,6 +683,14 @@ public static IndexMergePolicy fromString(String text) { Property.InternalIndex ); + public static final Setting SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE = new Setting<>( + "index.searchable_snapshot.shard_path_type", + PathType.FIXED.toString(), + PathType::parseString, + Property.IndexScope, + Property.InternalIndex + ); + public static final Setting DEFAULT_SEARCH_PIPELINE = new Setting<>( "index.search.default_pipeline", SearchPipelineService.NOOP_PIPELINE_ID, diff --git a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java index d3c6fc9d1f3bf..03d841d13b7f7 100644 --- a/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java +++ b/server/src/main/java/org/opensearch/index/recovery/RemoteStoreRestoreService.java @@ -227,7 +227,8 @@ private RemoteRestoreResult executeRestore( .build(); } - IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID()); + // This instance of IndexId is not related to Snapshot Restore. Hence, we are using the ctor without pathType. + IndexId indexId = new IndexId(indexName, updatedIndexMetadata.getIndexUUID(), IndexId.DEFAULT_SHARD_PATH_TYPE); if (metadataFromRemoteStore == false) { Map indexShardRoutingTableMap = currentState.routingTable() diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java index 6118650f1924d..fb645e33b8fbd 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreEnums.java @@ -213,7 +213,7 @@ public enum PathHashAlgorithm { @Override String hash(BasePathInput pathInput) { StringBuilder input = new StringBuilder(); - for (String path : pathInput.fixedSubPath().toArray()) { + for (String path : pathInput.hashPath().toArray()) { input.append(path); } long hash = FNV1a.hash64(input.toString()); @@ -228,7 +228,7 @@ String hash(BasePathInput pathInput) { @Override String hash(BasePathInput pathInput) { StringBuilder input = new StringBuilder(); - for (String path : pathInput.fixedSubPath().toArray()) { + for (String path : pathInput.hashPath().toArray()) { input.append(path); } long hash = FNV1a.hash64(input.toString()); diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java index 05357aaf6ec72..843992004f23b 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStorePathStrategy.java @@ -17,6 +17,7 @@ import org.opensearch.index.remote.RemoteStoreEnums.DataType; import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.util.Objects; @@ -111,6 +112,10 @@ BlobPath fixedSubPath() { return BlobPath.cleanPath().add(indexUUID); } + BlobPath hashPath() { + return fixedSubPath(); + } + /** * Returns a new builder for {@link BasePathInput}. */ @@ -138,7 +143,7 @@ public T basePath(BlobPath basePath) { return self(); } - public Builder indexUUID(String indexUUID) { + public T indexUUID(String indexUUID) { this.indexUUID = indexUUID; return self(); } @@ -153,6 +158,65 @@ public BasePathInput build() { } } + /** + * A subclass of {@link PathInput} that represents the input required to generate a path + * for a shard in a snapshot. It includes the base path, index UUID, and shard ID. + * + * @opensearch.internal + */ + public static class SnapshotShardPathInput extends BasePathInput { + private final String shardId; + + public SnapshotShardPathInput(SnapshotShardPathInput.Builder builder) { + super(builder); + this.shardId = Objects.requireNonNull(builder.shardId); + } + + @Override + BlobPath fixedSubPath() { + return BlobPath.cleanPath().add(BlobStoreRepository.INDICES_DIR).add(super.fixedSubPath()).add(shardId); + } + + @Override + BlobPath hashPath() { + return BlobPath.cleanPath().add(shardId).add(indexUUID()); + } + + public String shardId() { + return shardId; + } + + /** + * Returns a new builder for {@link SnapshotShardPathInput}. + */ + public static SnapshotShardPathInput.Builder builder() { + return new SnapshotShardPathInput.Builder(); + } + + /** + * Builder for {@link SnapshotShardPathInput}. + * + * @opensearch.internal + */ + public static class Builder extends BasePathInput.Builder { + private String shardId; + + public SnapshotShardPathInput.Builder shardId(String shardId) { + this.shardId = shardId; + return this; + } + + @Override + protected SnapshotShardPathInput.Builder self() { + return this; + } + + public SnapshotShardPathInput build() { + return new SnapshotShardPathInput(this); + } + } + } + /** * Wrapper class for the data aware path input required to generate path for remote store uploads. This input is * composed of the parent inputs, shard id, data category and data type. diff --git a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java index 177f0526e7571..e027e8b7cb3b1 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/directory/RemoteSnapshotDirectoryFactory.java @@ -11,14 +11,15 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.ShardPath; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; import org.opensearch.index.snapshots.blobstore.IndexShardSnapshot; import org.opensearch.index.store.remote.filecache.FileCache; import org.opensearch.index.store.remote.utils.TransferManager; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; @@ -74,10 +75,11 @@ private Future createRemoteSnapshotDirectoryFromSnapsho ShardPath localShardPath, BlobStoreRepository blobStoreRepository ) throws IOException { - final BlobPath blobPath = blobStoreRepository.basePath() - .add("indices") - .add(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings())) - .add(Integer.toString(localShardPath.getShardId().getId())); + // The below information like the snapshot generated indexId, shard_path_type and shardId are used for + // creating the shard BlobContainer. This information has been updated as per the hashed_prefix snapshots. + String indexId = IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings()); + PathType pathType = IndexSettings.SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE.get(indexSettings.getSettings()); + int shardId = localShardPath.getShardId().getId(); final SnapshotId snapshotId = new SnapshotId( IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.get(indexSettings.getSettings()), IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.get(indexSettings.getSettings()) @@ -89,7 +91,12 @@ private Future createRemoteSnapshotDirectoryFromSnapsho // this trick is needed to bypass assertions in BlobStoreRepository::assertAllowableThreadPools in case of node restart and a remote // index restore is invoked return threadPool.executor(ThreadPool.Names.SNAPSHOT).submit(() -> { - final BlobContainer blobContainer = blobStoreRepository.blobStore().blobContainer(blobPath); + // shardContainer(IndexId, shardId) method uses the id and pathType information to generate the blobPath and + // hence the blobContainer. We have used a dummy name as it plays no relevance in the blobPath generation. + final BlobContainer blobContainer = blobStoreRepository.shardContainer( + new IndexId("DUMMY", indexId, pathType.getCode()), + shardId + ); final IndexShardSnapshot indexShardSnapshot = blobStoreRepository.loadShardSnapshot(blobContainer, snapshotId); assert indexShardSnapshot instanceof BlobStoreIndexShardSnapshot : "indexShardSnapshot should be an instance of BlobStoreIndexShardSnapshot"; diff --git a/server/src/main/java/org/opensearch/repositories/IndexId.java b/server/src/main/java/org/opensearch/repositories/IndexId.java index 87a0063e8c21b..238dffbb46bde 100644 --- a/server/src/main/java/org/opensearch/repositories/IndexId.java +++ b/server/src/main/java/org/opensearch/repositories/IndexId.java @@ -32,6 +32,7 @@ package org.opensearch.repositories; +import org.opensearch.Version; import org.opensearch.common.annotation.PublicApi; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -40,6 +41,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.ToXContentObject; import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.remote.RemoteStoreEnums; import java.io.IOException; import java.util.Objects; @@ -51,23 +53,36 @@ */ @PublicApi(since = "1.0.0") public final class IndexId implements Writeable, ToXContentObject { - protected static final String NAME = "name"; - protected static final String ID = "id"; + static final String NAME = "name"; + static final String ID = "id"; + static final String SHARD_PATH_TYPE = "shard_path_type"; + public static final int DEFAULT_SHARD_PATH_TYPE = RemoteStoreEnums.PathType.FIXED.getCode(); private final String name; private final String id; + private final int shardPathType; private final int hashCode; + // Used for testing only public IndexId(final String name, final String id) { + this(name, id, DEFAULT_SHARD_PATH_TYPE); + } + + public IndexId(String name, String id, int shardPathType) { this.name = name; this.id = id; + this.shardPathType = shardPathType; this.hashCode = computeHashCode(); - } public IndexId(final StreamInput in) throws IOException { this.name = in.readString(); this.id = in.readString(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + this.shardPathType = in.readVInt(); + } else { + this.shardPathType = DEFAULT_SHARD_PATH_TYPE; + } this.hashCode = computeHashCode(); } @@ -93,9 +108,16 @@ public String getId() { return id; } + /** + * The storage path type in remote store for the indexes having the underlying index ids. + */ + public int getShardPathType() { + return shardPathType; + } + @Override public String toString() { - return "[" + name + "/" + id + "]"; + return "[" + name + "/" + id + "/" + shardPathType + "]"; } @Override @@ -107,7 +129,7 @@ public boolean equals(Object o) { return false; } IndexId that = (IndexId) o; - return Objects.equals(name, that.name) && Objects.equals(id, that.id); + return Objects.equals(name, that.name) && Objects.equals(id, that.id) && Objects.equals(this.shardPathType, that.shardPathType); } @Override @@ -116,13 +138,16 @@ public int hashCode() { } private int computeHashCode() { - return Objects.hash(name, id); + return Objects.hash(name, id, shardPathType); } @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); out.writeString(id); + if (out.getVersion().onOrAfter(Version.CURRENT)) { + out.writeVInt(shardPathType); + } } @Override @@ -130,6 +155,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par builder.startObject(); builder.field(NAME, name); builder.field(ID, id); + builder.field(SHARD_PATH_TYPE, shardPathType); builder.endObject(); return builder; } diff --git a/server/src/main/java/org/opensearch/repositories/RepositoryData.java b/server/src/main/java/org/opensearch/repositories/RepositoryData.java index ddccc633e5d75..1eeb1d838f2ca 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoryData.java @@ -519,7 +519,7 @@ public List resolveIndices(final List indices) { * @param indicesToResolve names of indices to resolve * @param inFlightIds name to index mapping for currently in-flight snapshots not yet in the repository data to fall back to */ - public List resolveNewIndices(List indicesToResolve, Map inFlightIds) { + public List resolveNewIndices(List indicesToResolve, Map inFlightIds, int pathType) { List snapshotIndices = new ArrayList<>(); for (String index : indicesToResolve) { IndexId indexId = indices.get(index); @@ -527,13 +527,17 @@ public List resolveNewIndices(List indicesToResolve, Map resolveNewIndices(List indicesToResolve, Map inFlightIds) { + return resolveNewIndices(indicesToResolve, inFlightIds, IndexId.DEFAULT_SHARD_PATH_TYPE); + } + private static final String SHARD_GENERATIONS = "shard_generations"; private static final String INDEX_METADATA_IDENTIFIERS = "index_metadata_identifiers"; private static final String INDEX_METADATA_LOOKUP = "index_metadata_lookup"; @@ -546,10 +550,16 @@ public List resolveNewIndices(List indicesToResolve, Map snapshotIds = indexSnapshots.get(indexId); assert snapshotIds != null; @@ -791,14 +804,20 @@ private static void parseIndices( final List snapshotIds = new ArrayList<>(); final List gens = new ArrayList<>(); + String id = null; + int pathType = IndexId.DEFAULT_SHARD_PATH_TYPE; IndexId indexId = null; + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { final String indexMetaFieldName = parser.currentName(); final XContentParser.Token currentToken = parser.nextToken(); switch (indexMetaFieldName) { case INDEX_ID: - indexId = new IndexId(indexName, parser.text()); + id = parser.text(); + break; + case IndexId.SHARD_PATH_TYPE: + pathType = parser.intValue(); break; case SNAPSHOTS: XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_ARRAY, currentToken, parser); @@ -821,7 +840,7 @@ private static void parseIndices( // different versions create or delete snapshot in the same repository. throw new OpenSearchParseException( "Detected a corrupted repository, index " - + indexId + + new IndexId(indexName, id, pathType) + " references an unknown snapshot uuid [" + uuid + "]" @@ -838,9 +857,10 @@ private static void parseIndices( break; } } - assert indexId != null; + assert id != null; + indexId = new IndexId(indexName, id, pathType); indexSnapshots.put(indexId, Collections.unmodifiableList(snapshotIds)); - indexLookup.put(indexId.getId(), indexId); + indexLookup.put(id, indexId); for (int i = 0; i < gens.size(); i++) { String parsedGen = gens.get(i); if (fixBrokenShardGens) { diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 5d5b0e91d8a6a..48e5448de5d2d 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -109,8 +109,11 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.remote.RemoteStorePathStrategy.BasePathInput; +import org.opensearch.index.remote.RemoteStorePathStrategy.SnapshotShardPathInput; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; @@ -146,6 +149,8 @@ import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotMissingException; +import org.opensearch.snapshots.SnapshotShardPaths; +import org.opensearch.snapshots.SnapshotShardPaths.ShardInfo; import org.opensearch.snapshots.SnapshotsService; import org.opensearch.threadpool.ThreadPool; @@ -157,6 +162,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -179,7 +186,6 @@ import java.util.stream.Stream; import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; -import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; import static org.opensearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; @@ -225,6 +231,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private static final String UPLOADED_DATA_BLOB_PREFIX = "__"; + public static final String INDICES_DIR = "indices"; + /** * Prefix used for the identifiers of data blobs that were not actually written to the repository physically because their contents are * already stored in the metadata referencing them, i.e. in {@link BlobStoreIndexShardSnapshot} and @@ -270,6 +278,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public static final Setting SHALLOW_SNAPSHOT_V2 = Setting.boolSetting("shallow_snapshot_v2", false); + public static final Setting SHARD_PATH_TYPE = new Setting<>( + "shard_path_type", + PathType.FIXED.toString(), + PathType::parseString + ); + /** * Setting to set batch size of stale snapshot shard blobs that will be deleted by snapshot workers as part of snapshot deletion. * For optimal performance the value of the setting should be equal to or close to repository's max # of keys that can be deleted in single operation @@ -383,6 +397,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp BlobStoreIndexShardSnapshots::fromXContent ); + public static final ConfigBlobStoreFormat SNAPSHOT_SHARD_PATHS_FORMAT = new ConfigBlobStoreFormat<>( + SnapshotShardPaths.FILE_NAME_FORMAT + ); + private volatile boolean readOnly; private final boolean isSystemRepository; @@ -393,6 +411,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final SetOnce blobContainer = new SetOnce<>(); + private final SetOnce rootBlobContainer = new SetOnce<>(); + + private final SetOnce snapshotShardPathBlobContainer = new SetOnce<>(); + private final SetOnce blobStore = new SetOnce<>(); protected final ClusterService clusterService; @@ -787,6 +809,16 @@ BlobContainer getBlobContainer() { return blobContainer.get(); } + // package private, only use for testing + BlobContainer getRootBlobContainer() { + return rootBlobContainer.get(); + } + + // package private, only use for testing + public SetOnce getSnapshotShardPathBlobContainer() { + return snapshotShardPathBlobContainer; + } + // for test purposes only protected BlobStore getBlobStore() { return blobStore.get(); @@ -812,10 +844,47 @@ protected BlobContainer blobContainer() { } } } - return blobContainer; } + /** + * maintains single lazy instance of {@link BlobContainer} + */ + protected BlobContainer rootBlobContainer() { + assertSnapshotOrGenericThread(); + + BlobContainer rootBlobContainer = this.rootBlobContainer.get(); + if (rootBlobContainer == null) { + synchronized (lock) { + rootBlobContainer = this.rootBlobContainer.get(); + if (rootBlobContainer == null) { + rootBlobContainer = blobStore().blobContainer(BlobPath.cleanPath()); + this.rootBlobContainer.set(rootBlobContainer); + } + } + } + return rootBlobContainer; + } + + /** + * maintains single lazy instance of {@link BlobContainer} + */ + protected BlobContainer snapshotShardPathBlobContainer() { + assertSnapshotOrGenericThread(); + + BlobContainer snapshotShardPathBlobContainer = this.snapshotShardPathBlobContainer.get(); + if (snapshotShardPathBlobContainer == null) { + synchronized (lock) { + snapshotShardPathBlobContainer = this.snapshotShardPathBlobContainer.get(); + if (snapshotShardPathBlobContainer == null) { + snapshotShardPathBlobContainer = blobStore().blobContainer(basePath().add(SnapshotShardPaths.DIR)); + this.snapshotShardPathBlobContainer.set(snapshotShardPathBlobContainer); + } + } + } + return snapshotShardPathBlobContainer; + } + /** * Maintains single lazy instance of {@link BlobStore}. * Public for testing. @@ -1046,6 +1115,18 @@ private void doDeleteShardSnapshots( RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, ActionListener listener ) { + // We can create map of indexId to ShardInfo based on the old repository data. This is later used in cleanup + // of stale indexes in combination with Snapshot Shard Paths file + Map idToShardInfoMap = repositoryData.getIndices() + .values() + .stream() + .collect( + Collectors.toMap( + IndexId::getId, + indexId -> new ShardInfo(indexId, repositoryData.shardGenerations().getGens(indexId).size()) + ) + ); + if (SnapshotsService.useShardGenerations(repoMetaVersion)) { // First write the new shard state metadata (with the removed snapshot) and compute deletion targets final StepListener> writeShardMetaDataAndComputeDeletesStep = new StepListener<>(); @@ -1092,7 +1173,8 @@ private void doDeleteShardSnapshots( rootBlobs, updatedRepoData, remoteStoreLockManagerFactory, - afterCleanupsListener + afterCleanupsListener, + idToShardInfoMap ); asyncCleanupUnlinkedShardLevelBlobs( repositoryData, @@ -1123,7 +1205,8 @@ private void doDeleteShardSnapshots( rootBlobs, newRepoData, remoteStoreLockManagerFactory, - afterCleanupsListener + afterCleanupsListener, + idToShardInfoMap ); final StepListener> writeMetaAndComputeDeletesStep = new StepListener<>(); writeUpdatedShardMetaDataAndComputeDeletes( @@ -1148,13 +1231,25 @@ private void doDeleteShardSnapshots( } } + /** + * Cleans up the indices and data corresponding to all it's shards. + * + * @param deletedSnapshots list of snapshots being deleted + * @param foundIndices indices that are found at [base_path]/indices + * @param rootBlobs the blobs at the [base_path] + * @param updatedRepoData the new repository data after the deletion + * @param remoteStoreLockManagerFactory remote store lock manager factory used for shallow snapshots + * @param listener listener on deletion of the stale indices + * @param idToShardInfoMap map of indexId to ShardInfo + */ private void cleanupUnlinkedRootAndIndicesBlobs( Collection deletedSnapshots, Map foundIndices, Map rootBlobs, RepositoryData updatedRepoData, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - ActionListener listener + ActionListener listener, + Map idToShardInfoMap ) { cleanupStaleBlobs( deletedSnapshots, @@ -1162,7 +1257,8 @@ private void cleanupUnlinkedRootAndIndicesBlobs( rootBlobs, updatedRepoData, remoteStoreLockManagerFactory, - ActionListener.map(listener, ignored -> null) + ActionListener.map(listener, ignored -> null), + idToShardInfoMap ); } @@ -1173,7 +1269,7 @@ private void asyncCleanupUnlinkedShardLevelBlobs( RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, ActionListener listener ) { - final List filesToDelete = resolveFilesToDelete(oldRepositoryData, snapshotIds, deleteResults); + final List> filesToDelete = resolveFilesToDelete(oldRepositoryData, snapshotIds, deleteResults); if (filesToDelete.isEmpty()) { listener.onResponse(null); return; @@ -1181,10 +1277,10 @@ private void asyncCleanupUnlinkedShardLevelBlobs( try { AtomicInteger counter = new AtomicInteger(); - Collection> subList = filesToDelete.stream() + Collection>> subList = filesToDelete.stream() .collect(Collectors.groupingBy(it -> counter.getAndIncrement() / maxShardBlobDeleteBatch)) .values(); - final BlockingQueue> staleFilesToDeleteInBatch = new LinkedBlockingQueue<>(subList); + final BlockingQueue>> staleFilesToDeleteInBatch = new LinkedBlockingQueue<>(subList); final GroupedActionListener groupedListener = new GroupedActionListener<>( ActionListener.wrap(r -> { listener.onResponse(null); }, listener::onFailure), @@ -1287,57 +1383,67 @@ protected void releaseRemoteStoreLockAndCleanup( // When remoteStoreLockManagerFactory is non-null, while deleting the files, lock files are also released before deletion of respective // shallow-snap-UUID files. And if it is null, we just delete the stale shard blobs. private void executeStaleShardDelete( - BlockingQueue> staleFilesToDeleteInBatch, + BlockingQueue>> staleFilesToDeleteInBatch, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, GroupedActionListener listener ) throws InterruptedException { - List filesToDelete = staleFilesToDeleteInBatch.poll(0L, TimeUnit.MILLISECONDS); - if (filesToDelete != null) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { - try { - // filtering files for which remote store lock release and cleanup succeeded, - // remaining files for which it failed will be retried in next snapshot delete run. - List eligibleFilesToDelete = new ArrayList<>(); - for (String fileToDelete : filesToDelete) { - if (fileToDelete.contains(SHALLOW_SNAPSHOT_PREFIX)) { - String[] fileToDeletePath = fileToDelete.split("/"); - String indexId = fileToDeletePath[1]; - String shardId = fileToDeletePath[2]; - String shallowSnapBlob = fileToDeletePath[3]; - String snapshotUUID = extractShallowSnapshotUUID(shallowSnapBlob).orElseThrow(); - BlobContainer shardContainer = blobStore().blobContainer(indicesPath().add(indexId).add(shardId)); - try { - releaseRemoteStoreLockAndCleanup(shardId, snapshotUUID, shardContainer, remoteStoreLockManagerFactory); - eligibleFilesToDelete.add(fileToDelete); - } catch (Exception e) { - logger.error( - "Failed to release lock or cleanup shard for indexID {}, shardID {} " + "and snapshot {}", - indexId, - shardId, - snapshotUUID - ); - } - } else { - eligibleFilesToDelete.add(fileToDelete); + List> filesToDelete = staleFilesToDeleteInBatch.poll(0L, TimeUnit.MILLISECONDS); + if (filesToDelete == null) { + return; + } + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> { + try { + // filtering files for which remote store lock release and cleanup succeeded, + // remaining files for which it failed will be retried in next snapshot delete run. + List eligibleFilesToDelete = new ArrayList<>(); + for (Tuple fileToDelete : filesToDelete) { + BlobPath blobPath = fileToDelete.v1(); + String blobName = fileToDelete.v2(); + boolean deleteBlob = false; + if (blobName.startsWith(SHALLOW_SNAPSHOT_PREFIX)) { + String snapshotUUID = extractShallowSnapshotUUID(blobName).orElseThrow(); + String[] parts = blobPath.toArray(); + // For fixed, the parts would look like [,"indices","",""] + // For hashed_prefix, the parts would look like ["j01010001010",,"indices","",""] + // For hashed_infix, the parts would look like [,"j01010001010","indices","",""] + int partLength = parts.length; + String indexId = parts[partLength - 2]; + String shardId = parts[partLength - 1]; + BlobContainer shardContainer = blobStore().blobContainer(blobPath); + try { + releaseRemoteStoreLockAndCleanup(shardId, snapshotUUID, shardContainer, remoteStoreLockManagerFactory); + deleteBlob = true; + } catch (Exception e) { + logger.error( + "Failed to release lock or cleanup shard for indexID {}, shardID {} and snapshot {}", + indexId, + shardId, + snapshotUUID + ); } + } else { + deleteBlob = true; + } + if (deleteBlob) { + eligibleFilesToDelete.add(blobPath.buildAsString() + blobName); } - // Deleting the shard blobs - deleteFromContainer(blobContainer(), eligibleFilesToDelete); - l.onResponse(null); - } catch (Exception e) { - logger.warn( - () -> new ParameterizedMessage( - "[{}] Failed to delete following blobs during snapshot delete : {}", - metadata.name(), - filesToDelete - ), - e - ); - l.onFailure(e); } - executeStaleShardDelete(staleFilesToDeleteInBatch, remoteStoreLockManagerFactory, listener); - })); - } + // Deleting the shard blobs + deleteFromContainer(rootBlobContainer(), eligibleFilesToDelete); + l.onResponse(null); + } catch (Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "[{}] Failed to delete following blobs during snapshot delete : {}", + metadata.name(), + filesToDelete + ), + e + ); + l.onFailure(e); + } + executeStaleShardDelete(staleFilesToDeleteInBatch, remoteStoreLockManagerFactory, listener); + })); } // updates the shard state metadata for shards of a snapshot that is to be deleted. Also computes the files to be cleaned up. @@ -1479,26 +1585,30 @@ public void onFailure(Exception ex) { } } - private List resolveFilesToDelete( + /** + * Resolves the list of files that should be deleted during a snapshot deletion operation. + * This method combines files to be deleted from shard-level metadata and index-level metadata. + * + * @param oldRepositoryData The repository data before the snapshot deletion + * @param snapshotIds The IDs of the snapshots being deleted + * @param deleteResults The results of removing snapshots from shard-level metadata + * @return A list of tuples, each containing a blob path and the name of a blob to be deleted + */ + private List> resolveFilesToDelete( RepositoryData oldRepositoryData, Collection snapshotIds, Collection deleteResults ) { - final String basePath = basePath().buildAsString(); - final int basePathLen = basePath.length(); final Map> indexMetaGenerations = oldRepositoryData.indexMetaDataToRemoveAfterRemovingSnapshots( snapshotIds ); return Stream.concat(deleteResults.stream().flatMap(shardResult -> { - final String shardPath = shardContainer(shardResult.indexId, shardResult.shardId).path().buildAsString(); - return shardResult.blobsToDelete.stream().map(blob -> shardPath + blob); + final BlobPath shardPath = shardPath(shardResult.indexId, shardResult.shardId); + return shardResult.blobsToDelete.stream().map(blob -> Tuple.tuple(shardPath, blob)); }), indexMetaGenerations.entrySet().stream().flatMap(entry -> { - final String indexContainerPath = indexContainer(entry.getKey()).path().buildAsString(); - return entry.getValue().stream().map(id -> indexContainerPath + INDEX_METADATA_FORMAT.blobName(id)); - })).map(absolutePath -> { - assert absolutePath.startsWith(basePath); - return absolutePath.substring(basePathLen); - }).collect(Collectors.toList()); + final BlobPath indexPath = indexPath(entry.getKey()); + return entry.getValue().stream().map(id -> Tuple.tuple(indexPath, INDEX_METADATA_FORMAT.blobName(id))); + })).collect(Collectors.toList()); } /** @@ -1513,6 +1623,7 @@ private List resolveFilesToDelete( * @param rootBlobs all blobs found directly under the repository root * @param newRepoData new repository data that was just written * @param remoteStoreLockManagerFactory RemoteStoreLockManagerFactory to be used for cleaning up remote store lock files. + * @param idToShardInfoMap map of indexId to ShardInfo * @param listener listener to invoke with the combined {@link DeleteResult} of all blobs removed in this operation */ private void cleanupStaleBlobs( @@ -1521,7 +1632,8 @@ private void cleanupStaleBlobs( Map rootBlobs, RepositoryData newRepoData, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - ActionListener listener + ActionListener listener, + Map idToShardInfoMap ) { final GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { DeleteResult deleteResult = DeleteResult.ZERO; @@ -1546,10 +1658,27 @@ private void cleanupStaleBlobs( if (foundIndices.keySet().equals(survivingIndexIds)) { groupedListener.onResponse(DeleteResult.ZERO); } else { - cleanupStaleIndices(foundIndices, survivingIndexIds, remoteStoreLockManagerFactory, groupedListener); + Map snapshotShardPaths = getSnapshotShardPaths(); + cleanupStaleIndices( + foundIndices, + survivingIndexIds, + remoteStoreLockManagerFactory, + groupedListener, + snapshotShardPaths, + idToShardInfoMap + ); } } + private Map getSnapshotShardPaths() { + try { + return snapshotShardPathBlobContainer().listBlobs(); + } catch (IOException ex) { + logger.warn(new ParameterizedMessage("Repository [{}] Failed to get the snapshot shard paths", metadata.name()), ex); + } + return Collections.emptyMap(); + } + /** * Runs cleanup actions on the repository. Increments the repository state id by one before executing any modifications on the * repository. If remoteStoreLockManagerFactory is not null, remote store lock files are released when deleting the respective @@ -1602,7 +1731,8 @@ public void cleanup( rootBlobs, repositoryData, remoteStoreLockManagerFactory, - ActionListener.map(listener, RepositoryCleanupResult::new) + ActionListener.map(listener, RepositoryCleanupResult::new), + Collections.emptyMap() ), listener::onFailure ) @@ -1690,11 +1820,13 @@ private List cleanupStaleRootFiles( return Collections.emptyList(); } - private void cleanupStaleIndices( + void cleanupStaleIndices( Map foundIndices, Set survivingIndexIds, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - GroupedActionListener listener + GroupedActionListener listener, + Map snapshotShardPaths, + Map idToShardInfoMap ) { final GroupedActionListener groupedListener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { DeleteResult deleteResult = DeleteResult.ZERO; @@ -1718,7 +1850,13 @@ private void cleanupStaleIndices( foundIndices.size() - survivingIndexIds.size() ); for (int i = 0; i < workers; ++i) { - executeOneStaleIndexDelete(staleIndicesToDelete, remoteStoreLockManagerFactory, groupedListener); + executeOneStaleIndexDelete( + staleIndicesToDelete, + remoteStoreLockManagerFactory, + groupedListener, + snapshotShardPaths, + idToShardInfoMap + ); } } catch (Exception e) { // TODO: We shouldn't be blanket catching and suppressing all exceptions here and instead handle them safely upstream. @@ -1738,58 +1876,219 @@ private static boolean isIndexPresent(ClusterService clusterService, String inde return false; } + /** + * Executes the deletion of a single stale index. + * + * @param staleIndicesToDelete Queue of stale indices to delete + * @param remoteStoreLockManagerFactory Factory for creating remote store lock managers + * @param listener Listener for grouped delete actions + * @param snapshotShardPaths Map of snapshot shard paths and their metadata + * @param idToShardInfoMap Map of indexId to ShardInfo + * @throws InterruptedException if the thread is interrupted while waiting + */ private void executeOneStaleIndexDelete( BlockingQueue> staleIndicesToDelete, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, - GroupedActionListener listener + GroupedActionListener listener, + Map snapshotShardPaths, + Map idToShardInfoMap ) throws InterruptedException { Map.Entry indexEntry = staleIndicesToDelete.poll(0L, TimeUnit.MILLISECONDS); - if (indexEntry != null) { - final String indexSnId = indexEntry.getKey(); - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.supply(listener, () -> { - DeleteResult deleteResult = DeleteResult.ZERO; - try { - logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); - if (remoteStoreLockManagerFactory != null) { - final Map shardBlobs = indexEntry.getValue().children(); - for (Map.Entry shardBlob : shardBlobs.entrySet()) { - for (String blob : shardBlob.getValue().listBlobs().keySet()) { - final Optional snapshotUUID = extractShallowSnapshotUUID(blob); - if (snapshotUUID.isPresent()) { - releaseRemoteStoreLockAndCleanup( - shardBlob.getKey(), - snapshotUUID.get(), - shardBlob.getValue(), - remoteStoreLockManagerFactory - ); - } - } - } - } - // Deleting the index folder - deleteResult = indexEntry.getValue().delete(); - logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); - } catch (IOException e) { - logger.warn( - () -> new ParameterizedMessage( - "[{}] index {} is no longer part of any snapshots in the repository, " - + "but failed to clean up their index folders", - metadata.name(), - indexSnId - ), - e - ); - } catch (Exception e) { - assert false : e; - logger.warn(new ParameterizedMessage("[{}] Exception during single stale index delete", metadata.name()), e); + if (indexEntry == null) { + return; + } + final String indexSnId = indexEntry.getKey(); + threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.supply(listener, () -> { + try { + logger.debug("[{}] Found stale index [{}]. Cleaning it up", metadata.name(), indexSnId); + List matchingShardPaths = findMatchingShardPaths(indexSnId, snapshotShardPaths); + Optional highestGenShardPaths = findHighestGenerationShardPaths(matchingShardPaths); + + // The shardInfo can be null for 1) snapshots that pre-dates the hashed prefix snapshots. + // 2) Snapshot shard paths file upload failed + // In such cases, we fallback to fixed_path for cleanup of the data. + ShardInfo shardInfo = getShardInfo(highestGenShardPaths, idToShardInfoMap, indexSnId); + + if (remoteStoreLockManagerFactory != null) { + cleanupRemoteStoreLocks(indexEntry, shardInfo, remoteStoreLockManagerFactory); } - executeOneStaleIndexDelete(staleIndicesToDelete, remoteStoreLockManagerFactory, listener); + // Deletes the shard level data for the underlying index based on the shardInfo that was obtained above. + DeleteResult deleteResult = deleteShardData(shardInfo); + + // If there are matchingShardPaths, then we delete them after we have deleted the shard data. + deleteResult = deleteResult.add(cleanUpStaleSnapshotShardPathsFile(matchingShardPaths, snapshotShardPaths)); + + // Finally, we delete the [base_path]/indexId folder + deleteResult = deleteResult.add(indexEntry.getValue().delete()); // Deleting the index folder + logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); return deleteResult; - })); + } catch (IOException e) { + logger.warn( + () -> new ParameterizedMessage( + "[{}] index {} is no longer part of any snapshots in the repository, " + + "but failed to clean up their index folders", + metadata.name(), + indexSnId + ), + e + ); + return DeleteResult.ZERO; + } catch (Exception e) { + assert false : e; + logger.warn(new ParameterizedMessage("[{}] Exception during single stale index delete", metadata.name()), e); + return DeleteResult.ZERO; + } finally { + executeOneStaleIndexDelete( + staleIndicesToDelete, + remoteStoreLockManagerFactory, + listener, + snapshotShardPaths, + idToShardInfoMap + ); + } + })); + } + + /** + * Finds and returns a list of shard paths that match the given index ID. + * + * @param indexId The ID of the index to match + * @param snapshotShardPaths Map of snapshot shard paths and their metadata + * @return List of matching shard paths + */ + private List findMatchingShardPaths(String indexId, Map snapshotShardPaths) { + return snapshotShardPaths.keySet().stream().filter(s -> s.startsWith(indexId)).collect(Collectors.toList()); + } + + /** + * Finds the shard path with the highest generation number from the given list of matching shard paths. + * + * @param matchingShardPaths List of shard paths that match a specific criteria + * @return An Optional containing the shard path with the highest generation number, or empty if the list is empty + */ + private Optional findHighestGenerationShardPaths(List matchingShardPaths) { + return matchingShardPaths.stream() + .map(s -> s.split("\\" + SnapshotShardPaths.DELIMITER)) + .sorted((a, b) -> Integer.parseInt(b[2]) - Integer.parseInt(a[2])) + .map(parts -> String.join(SnapshotShardPaths.DELIMITER, parts)) + .findFirst(); + } + + /** + * Cleans up remote store locks for a given index entry. + * + * @param indexEntry The index entry containing the blob container + * @param shardInfo ShardInfo for the IndexId being cleaned up + * @param remoteStoreLockManagerFactory Factory for creating remote store lock managers + * @throws IOException If an I/O error occurs during the cleanup process + */ + private void cleanupRemoteStoreLocks( + Map.Entry indexEntry, + ShardInfo shardInfo, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory + ) throws IOException { + if (shardInfo == null) { + releaseRemoteStoreLocksAndCleanup(indexEntry.getValue().children(), remoteStoreLockManagerFactory); + } else { + Map shardContainers = new HashMap<>(shardInfo.getShardCount()); + for (int i = 0; i < shardInfo.getShardCount(); i++) { + shardContainers.put(String.valueOf(i), shardContainer(shardInfo.getIndexId(), i)); + } + releaseRemoteStoreLocksAndCleanup(shardContainers, remoteStoreLockManagerFactory); } } + /** + * Releases remote store locks and performs cleanup for each shard blob. + * + * @param shardBlobs Map of shard IDs to their corresponding BlobContainers + * @param remoteStoreLockManagerFactory Factory for creating remote store lock managers + * @throws IOException If an I/O error occurs during the release and cleanup process + */ + void releaseRemoteStoreLocksAndCleanup( + Map shardBlobs, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory + ) throws IOException { + for (Map.Entry shardBlob : shardBlobs.entrySet()) { + for (String blob : shardBlob.getValue().listBlobs().keySet()) { + final Optional snapshotUUID = extractShallowSnapshotUUID(blob); + if (snapshotUUID.isPresent()) { + releaseRemoteStoreLockAndCleanup( + shardBlob.getKey(), + snapshotUUID.get(), + shardBlob.getValue(), + remoteStoreLockManagerFactory + ); + } + } + } + } + + /** + * Deletes shard data for the provided ShardInfo object. + * + * @param shardInfo The ShardInfo object containing information about the shards to be deleted. + * @return A DeleteResult object representing the result of the deletion operation. + * @throws IOException If an I/O error occurs during the deletion process. + */ + private DeleteResult deleteShardData(ShardInfo shardInfo) throws IOException { + // If the provided ShardInfo is null, return a zero DeleteResult + if (shardInfo == null) { + return DeleteResult.ZERO; + } + + // Initialize the DeleteResult with zero values + DeleteResult deleteResult = DeleteResult.ZERO; + + // Iterate over the shards and delete each shard's data + for (int i = 0; i < shardInfo.getShardCount(); i++) { + // Call the delete method on the shardContainer and accumulate the result + deleteResult = deleteResult.add(shardContainer(shardInfo.getIndexId(), i).delete()); + } + + // Return the accumulated DeleteResult + return deleteResult; + } + + /** + * Retrieves the ShardInfo object based on the provided highest generation shard paths, + * index ID, and the mapping of index IDs to ShardInfo objects. + * + * @param highestGenShardPaths The optional highest generation shard path. + * @param idToShardInfoMap A map containing index IDs and their corresponding ShardInfo objects. + * @param indexId The index ID for which the ShardInfo object is needed. + * @return The ShardInfo object with the highest shard count, or null if no ShardInfo is available. + */ + private ShardInfo getShardInfo(Optional highestGenShardPaths, Map idToShardInfoMap, String indexId) { + // Extract the ShardInfo object from the highest generation shard path, if present + ShardInfo shardInfoFromPath = highestGenShardPaths.map(SnapshotShardPaths::parseShardPath).orElse(null); + + // Retrieve the ShardInfo object from the idToShardInfoMap using the indexId + ShardInfo shardInfoFromMap = idToShardInfoMap.get(indexId); + + // If shardInfoFromPath is null, return shardInfoFromMap (which could also be null) + if (shardInfoFromPath == null) { + return shardInfoFromMap; + } + + // If shardInfoFromMap is null, return shardInfoFromPath (which could also be null) + if (shardInfoFromMap == null) { + return shardInfoFromPath; + } + + // If both shardInfoFromPath and shardInfoFromMap are non-null, + // return the ShardInfo object with the higher shard count + return shardInfoFromPath.getShardCount() >= shardInfoFromMap.getShardCount() ? shardInfoFromPath : shardInfoFromMap; + } + + private DeleteResult cleanUpStaleSnapshotShardPathsFile(List matchingShardPaths, Map snapshotShardPaths) + throws IOException { + deleteFromContainer(snapshotShardPathBlobContainer(), matchingShardPaths); + long totalBytes = matchingShardPaths.stream().mapToLong(s -> snapshotShardPaths.get(s).length()).sum(); + return new DeleteResult(matchingShardPaths.size(), totalBytes); + } + @Override public void finalizeSnapshot( final ShardGenerations shardGenerations, @@ -1864,6 +2163,10 @@ public void finalizeSnapshot( indexMetas, indexMetaIdentifiers ); + // The snapshot shards path would be uploaded for new index ids or index ids where the shard gen count (a.k.a + // number_of_shards) has increased. + Set updatedIndexIds = writeNewIndexShardPaths(existingRepositoryData, updatedRepositoryData, snapshotId); + cleanupRedundantSnapshotShardPaths(updatedIndexIds); writeIndexGen( updatedRepositoryData, repositoryStateId, @@ -1926,21 +2229,130 @@ public void finalizeSnapshot( }, onUpdateFailure); } + /** + * This method cleans up the redundant snapshot shard paths file for index ids where the number of shards has increased + * on account of new indexes by same index name being snapshotted that exists already in the repository's snapshots. + */ + private void cleanupRedundantSnapshotShardPaths(Set updatedShardPathsIndexIds) { + Set updatedIndexIds = updatedShardPathsIndexIds.stream() + .map(s -> s.split("\\" + SnapshotShardPaths.DELIMITER)[0]) + .collect(Collectors.toSet()); + Set indexIdShardPaths = getSnapshotShardPaths().keySet(); + List staleShardPaths = indexIdShardPaths.stream().filter(s -> updatedShardPathsIndexIds.contains(s) == false).filter(s -> { + String indexId = s.split("\\" + SnapshotShardPaths.DELIMITER)[0]; + return updatedIndexIds.contains(indexId); + }).collect(Collectors.toList()); + try { + deleteFromContainer(snapshotShardPathBlobContainer(), staleShardPaths); + } catch (IOException e) { + logger.warn( + new ParameterizedMessage( + "Repository [{}] Exception during snapshot stale index deletion {}", + metadata.name(), + staleShardPaths + ), + e + ); + } + } + + private Set writeNewIndexShardPaths( + RepositoryData existingRepositoryData, + RepositoryData updatedRepositoryData, + SnapshotId snapshotId + ) { + Set updatedIndexIds = new HashSet<>(); + Set indicesToUpdate = new HashSet<>(updatedRepositoryData.getIndices().values()); + for (IndexId indexId : indicesToUpdate) { + if (indexId.getShardPathType() == PathType.FIXED.getCode()) { + continue; + } + int oldShardCount = existingRepositoryData.shardGenerations().getGens(indexId).size(); + int newShardCount = updatedRepositoryData.shardGenerations().getGens(indexId).size(); + if (newShardCount > oldShardCount) { + String shardPathsBlobName = writeIndexShardPaths(indexId, snapshotId, newShardCount); + if (Objects.nonNull(shardPathsBlobName)) { + updatedIndexIds.add(shardPathsBlobName); + } + } + } + return updatedIndexIds; + } + + String writeIndexShardPaths(IndexId indexId, SnapshotId snapshotId, int shardCount) { + try { + List paths = getShardPaths(indexId, shardCount); + int pathType = indexId.getShardPathType(); + int pathHashAlgorithm = FNV_1A_COMPOSITE_1.getCode(); + String blobName = String.join( + SnapshotShardPaths.DELIMITER, + indexId.getId(), + indexId.getName(), + String.valueOf(shardCount), + String.valueOf(pathType), + String.valueOf(pathHashAlgorithm) + ); + SnapshotShardPaths shardPaths = new SnapshotShardPaths( + paths, + indexId.getId(), + indexId.getName(), + shardCount, + PathType.fromCode(pathType), + PathHashAlgorithm.fromCode(pathHashAlgorithm) + ); + SNAPSHOT_SHARD_PATHS_FORMAT.write(shardPaths, snapshotShardPathBlobContainer(), blobName); + logShardPathsOperationSuccess(indexId, snapshotId); + return blobName; + } catch (IOException e) { + logShardPathsOperationWarning(indexId, snapshotId, e); + } + return null; + } + + private List getShardPaths(IndexId indexId, int shardCount) { + List paths = new ArrayList<>(); + for (int shardId = 0; shardId < shardCount; shardId++) { + BlobPath shardPath = shardPath(indexId, shardId); + paths.add(shardPath.buildAsString()); + } + return paths; + } + + private void logShardPathsOperationSuccess(IndexId indexId, SnapshotId snapshotId) { + logger.trace( + () -> new ParameterizedMessage( + "Repository [{}] successfully wrote shard paths for index [{}] in snapshot [{}]", + metadata.name(), + indexId.getName(), + snapshotId.getName() + ) + ); + } + + private void logShardPathsOperationWarning(IndexId indexId, SnapshotId snapshotId, @Nullable Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "Repository [{}] Failed to write shard paths for index [{}] in snapshot [{}]", + metadata.name(), + indexId.getName(), + snapshotId.getName() + ), + e + ); + } + // Delete all old shard gen blobs that aren't referenced any longer as a result from moving to updated repository data private void cleanupOldShardGens(RepositoryData existingRepositoryData, RepositoryData updatedRepositoryData) { final List toDelete = new ArrayList<>(); - final int prefixPathLen = basePath().buildAsString().length(); updatedRepositoryData.shardGenerations() .obsoleteShardGenerations(existingRepositoryData.shardGenerations()) .forEach( (indexId, gens) -> gens.forEach( - (shardId, oldGen) -> toDelete.add( - shardContainer(indexId, shardId).path().buildAsString().substring(prefixPathLen) + INDEX_FILE_PREFIX + oldGen - ) + (shardId, oldGen) -> toDelete.add(shardPath(indexId, shardId).buildAsString() + INDEX_FILE_PREFIX + oldGen) ) ); try { - deleteFromContainer(blobContainer(), toDelete); + deleteFromContainer(rootBlobContainer(), toDelete); } catch (Exception e) { logger.warn("Failed to clean up old shard generation blobs", e); } @@ -1987,11 +2399,15 @@ private void deleteFromContainer(BlobContainer container, List blobs) th } private BlobPath indicesPath() { - return basePath().add("indices"); + return basePath().add(INDICES_DIR); } private BlobContainer indexContainer(IndexId indexId) { - return blobStore().blobContainer(indicesPath().add(indexId.getId())); + return blobStore().blobContainer(indexPath(indexId)); + } + + private BlobPath indexPath(IndexId indexId) { + return indicesPath().add(indexId.getId()); } private BlobContainer shardContainer(IndexId indexId, ShardId shardId) { @@ -1999,7 +2415,17 @@ private BlobContainer shardContainer(IndexId indexId, ShardId shardId) { } public BlobContainer shardContainer(IndexId indexId, int shardId) { - return blobStore().blobContainer(indicesPath().add(indexId.getId()).add(Integer.toString(shardId))); + return blobStore().blobContainer(shardPath(indexId, shardId)); + } + + private BlobPath shardPath(IndexId indexId, int shardId) { + PathType pathType = PathType.fromCode(indexId.getShardPathType()); + SnapshotShardPathInput shardPathInput = new SnapshotShardPathInput.Builder().basePath(basePath()) + .indexUUID(indexId.getId()) + .shardId(String.valueOf(shardId)) + .build(); + PathHashAlgorithm pathHashAlgorithm = pathType != PathType.FIXED ? FNV_1A_COMPOSITE_1 : null; + return pathType.path(shardPathInput, pathHashAlgorithm); } /** @@ -2086,7 +2512,7 @@ private BlobContainer testContainer(String seed) { BlobPath testBlobPath; if (prefixModeVerification == true) { BasePathInput pathInput = BasePathInput.builder().basePath(basePath()).indexUUID(seed).build(); - testBlobPath = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + testBlobPath = PathType.HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); } else { testBlobPath = basePath(); } @@ -2174,11 +2600,12 @@ private void doGetRepositoryData(ActionListener listener) { loaded = repositoryDataFromCachedEntry(cached); } else { loaded = getRepositoryData(genToLoad); + Version minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); // We can cache serialized in the most recent version here without regard to the actual repository metadata version // since we're only caching the information that we just wrote and thus won't accidentally cache any information that // isn't safe cacheRepositoryData( - BytesReference.bytes(loaded.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT)), + BytesReference.bytes(loaded.snapshotsToXContent(XContentFactory.jsonBuilder(), Version.CURRENT, minNodeVersion)), genToLoad ); } @@ -2569,8 +2996,9 @@ public void onFailure(Exception e) { } final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen); logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob); + Version minNodeVersion = clusterService.state().nodes().getMinNodeVersion(); final BytesReference serializedRepoData = BytesReference.bytes( - newRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), version) + newRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), version, minNodeVersion) ); writeAtomic(blobContainer(), indexBlob, serializedRepoData, true); maybeWriteIndexLatest(newGen); diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java index 8127bf8c2a2a2..630048c61785d 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/ConfigBlobStoreFormat.java @@ -76,4 +76,8 @@ public void writeAsyncWithUrgentPriority(T obj, BlobContainer blobContainer, Str ((AsyncMultiStreamBlobContainer) blobContainer).asyncBlobUpload(remoteTransferContainer.createWriteContext(), listener); } } + + public void write(T obj, BlobContainer blobContainer, String name) throws IOException { + write(obj, blobContainer, name, new NoneCompressor(), ToXContent.EMPTY_PARAMS, XContentType.JSON, null, null); + } } diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index f102289160b71..08c30ea503a6d 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -87,6 +87,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.remote.filecache.FileCacheStats; @@ -1345,6 +1346,7 @@ private static IndexMetadata addSnapshotToIndexSettings(IndexMetadata metadata, .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey(), snapshot.getSnapshotId().getUUID()) .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.getKey(), snapshot.getSnapshotId().getName()) .put(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.getKey(), indexId.getId()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_SHARD_PATH_TYPE.getKey(), PathType.fromCode(indexId.getShardPathType())) .build(); return IndexMetadata.builder(metadata).settings(newSettings).build(); } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java b/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java new file mode 100644 index 0000000000000..88af14e2232f9 --- /dev/null +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotShardPaths.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.repositories.IndexId; + +import java.io.IOException; +import java.util.List; + +/** + * Snapshot Shard path information. + * + * @opensearch.internal + */ +public class SnapshotShardPaths implements ToXContent { + + public static final String DIR = "snapshot_shard_paths"; + + public static final String DELIMITER = "."; + + public static final String FILE_NAME_FORMAT = "%s"; + + private static final String PATHS_FIELD = "paths"; + private static final String INDEX_ID_FIELD = "indexId"; + private static final String INDEX_NAME_FIELD = "indexName"; + private static final String NUMBER_OF_SHARDS_FIELD = "number_of_shards"; + private static final String SHARD_PATH_TYPE_FIELD = "shard_path_type"; + private static final String SHARD_PATH_HASH_ALGORITHM_FIELD = "shard_path_hash_algorithm"; + + private final List paths; + private final String indexId; + private final String indexName; + private final int numberOfShards; + private final PathType shardPathType; + private final PathHashAlgorithm shardPathHashAlgorithm; + + public SnapshotShardPaths( + List paths, + String indexId, + String indexName, + int numberOfShards, + PathType shardPathType, + PathHashAlgorithm shardPathHashAlgorithm + ) { + assert !paths.isEmpty() : "paths must not be empty"; + assert indexId != null && !indexId.isEmpty() : "indexId must not be empty"; + assert indexName != null && !indexName.isEmpty() : "indexName must not be empty"; + assert numberOfShards > 0 : "numberOfShards must be > 0"; + assert shardPathType != null : "shardPathType must not be null"; + assert shardPathHashAlgorithm != null : "shardPathHashAlgorithm must not be null"; + + this.paths = paths; + this.indexId = indexId; + this.indexName = indexName; + this.numberOfShards = numberOfShards; + this.shardPathType = shardPathType; + this.shardPathHashAlgorithm = shardPathHashAlgorithm; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(INDEX_ID_FIELD, indexId); + builder.field(INDEX_NAME_FIELD, indexName); + builder.field(NUMBER_OF_SHARDS_FIELD, numberOfShards); + builder.field(SHARD_PATH_TYPE_FIELD, shardPathType.getCode()); + builder.field(SHARD_PATH_HASH_ALGORITHM_FIELD, shardPathHashAlgorithm.getCode()); + builder.startArray(PATHS_FIELD); + for (String path : paths) { + builder.value(path); + } + builder.endArray(); + return builder; + } + + public static SnapshotShardPaths fromXContent(XContentParser ignored) { + throw new UnsupportedOperationException("SnapshotShardPaths.fromXContent() is not supported"); + } + + /** + * Parses a shard path string and extracts relevant shard information. + * + * @param shardPath The shard path string to parse. Expected format is: + * [index_id]#[index_name]#[shard_count]#[path_type_code]#[path_hash_algorithm_code] + * @return A {@link ShardInfo} object containing the parsed index ID and shard count. + * @throws IllegalArgumentException if the shard path format is invalid or cannot be parsed. + */ + public static ShardInfo parseShardPath(String shardPath) { + String[] parts = shardPath.split("\\" + SnapshotShardPaths.DELIMITER); + if (parts.length != 5) { + throw new IllegalArgumentException("Invalid shard path format: " + shardPath); + } + try { + IndexId indexId = new IndexId(parts[1], parts[0], Integer.parseInt(parts[3])); + int shardCount = Integer.parseInt(parts[2]); + return new ShardInfo(indexId, shardCount); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Invalid shard path format: " + shardPath, e); + } + } + + /** + * Represents parsed information from a shard path. + * This class encapsulates the index ID and shard count extracted from a shard path string. + */ + public static class ShardInfo { + /** The ID of the index associated with this shard. */ + private final IndexId indexId; + + /** The total number of shards for this index. */ + private final int shardCount; + + /** + * Constructs a new ShardInfo instance. + * + * @param indexId The ID of the index associated with this shard. + * @param shardCount The total number of shards for this index. + */ + public ShardInfo(IndexId indexId, int shardCount) { + this.indexId = indexId; + this.shardCount = shardCount; + } + + public IndexId getIndexId() { + return indexId; + } + + public int getShardCount() { + return shardCount; + } + } +} diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index ed91e9114e106..998fa0161550c 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -138,6 +138,7 @@ import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; import static org.opensearch.repositories.blobstore.BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY; import static org.opensearch.repositories.blobstore.BlobStoreRepository.SHALLOW_SNAPSHOT_V2; +import static org.opensearch.repositories.blobstore.BlobStoreRepository.SHARD_PATH_TYPE; import static org.opensearch.snapshots.SnapshotUtils.validateSnapshotsBackingAnyIndex; /** @@ -527,9 +528,13 @@ public ClusterState execute(ClusterState currentState) { logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); + int pathType = clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.CURRENT) + ? SHARD_PATH_TYPE.get(repository.getMetadata().settings()).getCode() + : IndexId.DEFAULT_SHARD_PATH_TYPE; final List indexIds = repositoryData.resolveNewIndices( indices, - getInFlightIndexIds(runningSnapshots, repositoryName) + getInFlightIndexIds(runningSnapshots, repositoryName), + pathType ); final Version version = minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null); final Map shards = shards( @@ -668,7 +673,8 @@ public void createSnapshotV2(final CreateSnapshotRequest request, final ActionLi final List indexIds = repositoryData.resolveNewIndices( indices, - getInFlightIndexIds(runningSnapshots, repositoryName) + getInFlightIndexIds(runningSnapshots, repositoryName), + IndexId.DEFAULT_SHARD_PATH_TYPE ); final Version version = minCompatibleVersion(currentState.nodes().getMinNodeVersion(), repositoryData, null); final ShardGenerations shardGenerations = buildShardsGenerationFromRepositoryData( @@ -1348,7 +1354,14 @@ public ClusterState execute(ClusterState currentState) { assert entry.shards().isEmpty(); hadAbortedInitializations = true; } else { - final List indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap()); + int pathType = clusterService.state().nodes().getMinNodeVersion().onOrAfter(Version.CURRENT) + ? SHARD_PATH_TYPE.get(repository.getMetadata().settings()).getCode() + : IndexId.DEFAULT_SHARD_PATH_TYPE; + final List indexIds = repositoryData.resolveNewIndices( + indices, + Collections.emptyMap(), + pathType + ); // Replace the snapshot that was just initialized final Map shards = shards( snapshots, diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java index c3f52f3976414..e1110f51ecd3f 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreEnumsTests.java @@ -14,6 +14,7 @@ import org.opensearch.index.remote.RemoteStoreEnums.DataType; import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.index.remote.RemoteStorePathStrategy.PathInput; +import org.opensearch.index.remote.RemoteStorePathStrategy.SnapshotShardPathInput; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; @@ -597,6 +598,47 @@ public void testGeneratePathForHashedInfixType() { assertTrue(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual.startsWith(expected)); } + public void testGeneratePathForSnapshotShardPathInput() { + BlobPath blobPath = BlobPath.cleanPath().add("xjsdhj").add("ddjsha").add("yudy7sd").add("32hdhua7").add("89jdij"); + String indexUUID = "dsdkjsu8832njn"; + String shardId = "10"; + SnapshotShardPathInput pathInput = SnapshotShardPathInput.builder() + .basePath(blobPath) + .indexUUID(indexUUID) + .shardId(shardId) + .build(); + + // FIXED PATH + BlobPath result = FIXED.path(pathInput, null); + String expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/indices/dsdkjsu8832njn/10/"; + String actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_PREFIX - FNV_1A_COMPOSITE_1 + result = HASHED_PREFIX.path(pathInput, FNV_1A_COMPOSITE_1); + expected = "_11001000010110/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_PREFIX - FNV_1A_BASE64 + result = HASHED_PREFIX.path(pathInput, FNV_1A_BASE64); + expected = "_yFiSl_VGGM/xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_INFIX - FNV_1A_COMPOSITE_1 + result = HASHED_INFIX.path(pathInput, FNV_1A_COMPOSITE_1); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/_11001000010110/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + + // HASHED_INFIX - FNV_1A_BASE64 + result = HASHED_INFIX.path(pathInput, FNV_1A_BASE64); + expected = "xjsdhj/ddjsha/yudy7sd/32hdhua7/89jdij/_yFiSl_VGGM/indices/dsdkjsu8832njn/10/"; + actual = result.buildAsString(); + assertEquals(new ParameterizedMessage("expected={} actual={}", expected, actual).getFormattedMessage(), actual, expected); + } + private String derivePath(String basePath, PathInput pathInput) { return "".equals(basePath) ? String.join( diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java index 217ffe804573e..e4c64e16fb5be 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStorePathStrategyTests.java @@ -82,6 +82,46 @@ public void testFixedSubPath() { .dataType(DATA) .build(); assertEquals(BlobPath.cleanPath().add(INDEX_UUID).add(SHARD_ID).add(TRANSLOG.getName()).add(DATA.getName()), input2.fixedSubPath()); + } + + public void testSnapshotShardPathInput() { + assertThrows(NullPointerException.class, () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().build()); + assertThrows( + NullPointerException.class, + () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().basePath(BASE_PATH).build() + ); + assertThrows( + NullPointerException.class, + () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().indexUUID(INDEX_UUID).build() + ); + assertThrows(NullPointerException.class, () -> RemoteStorePathStrategy.SnapshotShardPathInput.builder().shardId(SHARD_ID).build()); + + RemoteStorePathStrategy.SnapshotShardPathInput input = RemoteStorePathStrategy.SnapshotShardPathInput.builder() + .basePath(BASE_PATH) + .indexUUID(INDEX_UUID) + .shardId(SHARD_ID) + .build(); + assertEquals(BASE_PATH, input.basePath()); + assertEquals(INDEX_UUID, input.indexUUID()); + assertEquals(SHARD_ID, input.shardId()); + } + + public void testSnapshotShardPathInputFixedSubPath() { + RemoteStorePathStrategy.SnapshotShardPathInput input = RemoteStorePathStrategy.SnapshotShardPathInput.builder() + .basePath(BASE_PATH) + .indexUUID(INDEX_UUID) + .shardId(SHARD_ID) + .build(); + assertEquals(BlobPath.cleanPath().add("indices").add(INDEX_UUID).add(SHARD_ID), input.fixedSubPath()); + } + public void testSnapshotShardPathInputHashPath() { + RemoteStorePathStrategy.SnapshotShardPathInput input = RemoteStorePathStrategy.SnapshotShardPathInput.builder() + .basePath(BASE_PATH) + .indexUUID(INDEX_UUID) + .shardId(SHARD_ID) + .build(); + assertEquals(BlobPath.cleanPath().add(SHARD_ID).add(INDEX_UUID), input.hashPath()); } + } diff --git a/server/src/test/java/org/opensearch/repositories/IndexIdTests.java b/server/src/test/java/org/opensearch/repositories/IndexIdTests.java index 2b927b3b40115..3b719d287aa9b 100644 --- a/server/src/test/java/org/opensearch/repositories/IndexIdTests.java +++ b/server/src/test/java/org/opensearch/repositories/IndexIdTests.java @@ -32,6 +32,7 @@ package org.opensearch.repositories; +import org.opensearch.Version; import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.xcontent.json.JsonXContent; @@ -39,6 +40,7 @@ import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -52,51 +54,84 @@ public void testEqualsAndHashCode() { // assert equals and hashcode String name = randomAlphaOfLength(8); String id = UUIDs.randomBase64UUID(); - IndexId indexId1 = new IndexId(name, id); - IndexId indexId2 = new IndexId(name, id); + int shardPathType = randomIntBetween(0, 2); + IndexId indexId1 = new IndexId(name, id, shardPathType); + IndexId indexId2 = new IndexId(name, id, shardPathType); assertEquals(indexId1, indexId2); assertEquals(indexId1.hashCode(), indexId2.hashCode()); // assert equals when using index name for id id = name; - indexId1 = new IndexId(name, id); - indexId2 = new IndexId(name, id); + indexId1 = new IndexId(name, id, shardPathType); + indexId2 = new IndexId(name, id, shardPathType); assertEquals(indexId1, indexId2); assertEquals(indexId1.hashCode(), indexId2.hashCode()); - // assert not equals when name or id differ - indexId2 = new IndexId(randomAlphaOfLength(8), id); + // assert not equals when name, id, or shardPathType differ + indexId2 = new IndexId(randomAlphaOfLength(8), id, shardPathType); assertNotEquals(indexId1, indexId2); assertNotEquals(indexId1.hashCode(), indexId2.hashCode()); - indexId2 = new IndexId(name, UUIDs.randomBase64UUID()); + indexId2 = new IndexId(name, UUIDs.randomBase64UUID(), shardPathType); assertNotEquals(indexId1, indexId2); assertNotEquals(indexId1.hashCode(), indexId2.hashCode()); + int newShardPathType = randomIntBetween(0, 2); + indexId2 = new IndexId(name, id, newShardPathType); + if (shardPathType == newShardPathType) { + assertEquals(indexId1, indexId2); + assertEquals(indexId1.hashCode(), indexId2.hashCode()); + } else { + assertNotEquals(indexId1, indexId2); + assertNotEquals(indexId1.hashCode(), indexId2.hashCode()); + } } public void testSerialization() throws IOException { - IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); + IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID(), randomIntBetween(0, 2)); BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(Version.CURRENT); indexId.writeTo(out); assertEquals(indexId, new IndexId(out.bytes().streamInput())); } public void testXContent() throws IOException { - IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); + String name = randomAlphaOfLength(8); + String id = UUIDs.randomBase64UUID(); + int shardPathType = randomIntBetween(0, 2); + IndexId indexId = new IndexId(name, id, shardPathType); XContentBuilder builder = JsonXContent.contentBuilder(); indexId.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - String name = null; - String id = null; + String parsedName = null; + String parsedId = null; + int parsedShardPathType = -1; while (parser.nextToken() != XContentParser.Token.END_OBJECT) { final String currentFieldName = parser.currentName(); parser.nextToken(); - if (currentFieldName.equals(IndexId.NAME)) { - name = parser.text(); - } else if (currentFieldName.equals(IndexId.ID)) { - id = parser.text(); + switch (currentFieldName) { + case IndexId.NAME: + parsedName = parser.text(); + break; + case IndexId.ID: + parsedId = parser.text(); + break; + case IndexId.SHARD_PATH_TYPE: + parsedShardPathType = parser.intValue(); + break; } } - assertNotNull(name); - assertNotNull(id); - assertEquals(indexId, new IndexId(name, id)); + parser.close(); + assertNotNull(parsedName); + assertNotNull(parsedId); + assertNotEquals(-1, parsedShardPathType); + assertEquals(name, parsedName); + assertEquals(id, parsedId); + assertEquals(shardPathType, parsedShardPathType); + } + + public void testDefaultShardPathType() { + String name = randomAlphaOfLength(8); + String id = UUIDs.randomBase64UUID(); + IndexId indexId = new IndexId(name, id); + assertEquals(IndexId.DEFAULT_SHARD_PATH_TYPE, indexId.getShardPathType()); + assertEquals(RemoteStoreEnums.PathType.FIXED.getCode(), IndexId.DEFAULT_SHARD_PATH_TYPE); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java index a5bfca6892013..e43335246deb3 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoryDataTests.java @@ -42,12 +42,14 @@ import org.opensearch.core.xcontent.XContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.index.remote.RemoteStoreEnums.PathType; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -59,6 +61,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.FIXED; +import static org.opensearch.index.remote.RemoteStoreEnums.PathType.HASHED_PREFIX; import static org.opensearch.repositories.RepositoryData.EMPTY_REPO_GEN; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -70,7 +74,7 @@ public class RepositoryDataTests extends OpenSearchTestCase { public void testEqualsAndHashCode() { - RepositoryData repositoryData1 = generateRandomRepoData(); + RepositoryData repositoryData1 = generateRandomRepoData(FIXED.getCode()); RepositoryData repositoryData2 = repositoryData1.copy(); assertEquals(repositoryData1, repositoryData2); assertEquals(repositoryData1.hashCode(), repositoryData2.hashCode()); @@ -332,7 +336,7 @@ public void testIndexThatReferenceANullSnapshot() throws IOException { ); assertThat( e.getMessage(), - equalTo("Detected a corrupted repository, " + "index [docs/_id] references an unknown snapshot uuid [null]") + equalTo("Detected a corrupted repository, " + "index [docs/_id/0] references an unknown snapshot uuid [null]") ); } } @@ -403,11 +407,95 @@ public void testIndexMetaDataToRemoveAfterRemovingSnapshotWithSharing() { assertEquals(newRepoData.indexMetaDataToRemoveAfterRemovingSnapshots(Collections.singleton(otherSnapshotId)), removeFromOther); } + public void testResolveNewIndices() { + // Test case 1: All indices are new + List indicesToResolve = Arrays.asList("index1", "index2", "index3"); + Map inFlightIds = Collections.emptyMap(); + int pathType = randomIntBetween(0, 2); + List resolvedIndices = RepositoryData.EMPTY.resolveNewIndices(indicesToResolve, inFlightIds, pathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + assertTrue(indicesToResolve.contains(indexId.getName())); + assertNotNull(indexId.getId()); + assertEquals(pathType, indexId.getShardPathType()); + } + + // Test case 2: Some indices are existing, some are new + RepositoryData repositoryData = generateRandomRepoData(); + Map existingIndices = repositoryData.getIndices(); + List existingIndexNames = new ArrayList<>(existingIndices.keySet()); + List newIndexNames = Arrays.asList("newIndex1", "newIndex2"); + indicesToResolve = new ArrayList<>(existingIndexNames); + indicesToResolve.addAll(newIndexNames); + pathType = randomIntBetween(0, 2); + resolvedIndices = repositoryData.resolveNewIndices(indicesToResolve, Collections.emptyMap(), pathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + if (existingIndexNames.contains(indexId.getName())) { + assertEquals(existingIndices.get(indexId.getName()), indexId); + } else { + assertTrue(newIndexNames.contains(indexId.getName())); + assertNotNull(indexId.getId()); + assertEquals(pathType, indexId.getShardPathType()); + } + } + + // Test case 3: Some indices are in-flight + Map inFlightIndexIds = new HashMap<>(); + for (String indexName : newIndexNames) { + inFlightIndexIds.put(indexName, new IndexId(indexName, UUIDs.randomBase64UUID(), pathType)); + } + resolvedIndices = repositoryData.resolveNewIndices(indicesToResolve, inFlightIndexIds, pathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + if (existingIndexNames.contains(indexId.getName())) { + assertEquals(existingIndices.get(indexId.getName()), indexId); + } else if (newIndexNames.contains(indexId.getName())) { + assertEquals(inFlightIndexIds.get(indexId.getName()), indexId); + } else { + fail("Unexpected index: " + indexId.getName()); + } + } + } + + public void testResolveNewIndicesWithDifferentPathType() { + // Generate repository data with a fixed path type + int existingPathType = PathType.FIXED.getCode(); + RepositoryData repositoryData = generateRandomRepoData(existingPathType); + Map existingIndices = repositoryData.getIndices(); + + // Create a list of existing and new index names + List existingIndexNames = new ArrayList<>(existingIndices.keySet()); + List newIndexNames = Arrays.asList("newIndex1", "newIndex2"); + List indicesToResolve = new ArrayList<>(existingIndexNames); + indicesToResolve.addAll(newIndexNames); + + // Use a different path type for new indices + int newPathType = HASHED_PREFIX.getCode(); + + List resolvedIndices = repositoryData.resolveNewIndices(indicesToResolve, Collections.emptyMap(), newPathType); + assertEquals(indicesToResolve.size(), resolvedIndices.size()); + for (IndexId indexId : resolvedIndices) { + if (existingIndexNames.contains(indexId.getName())) { + assertEquals(existingIndices.get(indexId.getName()), indexId); + assertEquals(existingPathType, indexId.getShardPathType()); + } else { + assertTrue(newIndexNames.contains(indexId.getName())); + assertNotNull(indexId.getId()); + assertEquals(newPathType, indexId.getShardPathType()); + } + } + } + public static RepositoryData generateRandomRepoData() { + return generateRandomRepoData(randomFrom(PathType.values()).getCode()); + } + + public static RepositoryData generateRandomRepoData(int pathType) { final int numIndices = randomIntBetween(1, 30); final List indices = new ArrayList<>(numIndices); for (int i = 0; i < numIndices; i++) { - indices.add(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())); + indices.add(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID(), pathType)); } final int numSnapshots = randomIntBetween(1, 30); RepositoryData repositoryData = RepositoryData.EMPTY; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 29ffb94ce8bf4..958a499ada167 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -9,7 +9,6 @@ package org.opensearch.repositories.blobstore; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; @@ -99,25 +98,15 @@ protected void assertSnapshotOrGenericThread() { } protected void createRepository(Client client, String repoName) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder().put(node().settings()).put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repoName, REPO_TYPE, settings); } protected void createRepository(Client client, String repoName, Settings repoSettings) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings(repoSettings) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settingsBuilder = Settings.builder().put(repoSettings); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repoName, REPO_TYPE, settingsBuilder); } protected void updateRepository(Client client, String repoName, Settings repoSettings) { diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 6b550cbc60b29..fcc0c5198894f 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -34,19 +34,28 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.DeleteResult; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.unit.ByteSizeUnit; +import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.RepositoryPlugin; @@ -55,9 +64,12 @@ import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryData; import org.opensearch.repositories.RepositoryException; +import org.opensearch.repositories.RepositoryStats; import org.opensearch.repositories.ShardGenerations; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; +import org.opensearch.snapshots.SnapshotShardPaths; +import org.opensearch.snapshots.SnapshotShardPaths.ShardInfo; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.OpenSearchIntegTestCase; @@ -65,15 +77,30 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.stream.Collectors; import static org.opensearch.repositories.RepositoryDataTests.generateRandomRepoData; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; /** * Tests for the {@link BlobStoreRepository} and its subclasses. @@ -114,13 +141,8 @@ public void testRetrieveSnapshots() throws Exception { final String repositoryName = "test-repo"; logger.info("--> creating repository"); - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings(Settings.builder().put(node().settings()).put("location", location)) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder().put(node().settings()).put("location", location); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings); logger.info("--> creating an index and indexing documents"); final String indexName = "test-idx"; @@ -239,20 +261,13 @@ public void testBadChunksize() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final String repositoryName = "test-repo"; - + Settings.Builder settings = Settings.builder() + .put(node().settings()) + .put("location", location) + .put("chunk_size", randomLongBetween(-10, 0), ByteSizeUnit.BYTES); expectThrows( RepositoryException.class, - () -> client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder() - .put(node().settings()) - .put("location", location) - .put("chunk_size", randomLongBetween(-10, 0), ByteSizeUnit.BYTES) - ) - .get() + () -> OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings) ); } @@ -260,18 +275,11 @@ public void testPrefixModeVerification() throws Exception { final Client client = client(); final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final String repositoryName = "test-repo"; - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder() - .put(node().settings()) - .put("location", location) - .put(BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(), true) - ) - .get(); - assertTrue(putRepositoryResponse.isAcknowledged()); + Settings.Builder settings = Settings.builder() + .put(node().settings()) + .put("location", location) + .put(BlobStoreRepository.PREFIX_MODE_VERIFICATION_SETTING.getKey(), true); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); @@ -305,13 +313,8 @@ private BlobStoreRepository setupRepo() { final Path location = OpenSearchIntegTestCase.randomRepoPath(node().settings()); final String repositoryName = "test-repo"; - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repositoryName) - .setType(REPO_TYPE) - .setSettings(Settings.builder().put(node().settings()).put("location", location)) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + Settings.Builder settings = Settings.builder().put(node().settings()).put("location", location); + OpenSearchIntegTestCase.putRepository(client.admin().cluster(), repositoryName, REPO_TYPE, settings); final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName); @@ -371,4 +374,258 @@ public void testRemoteStoreShardCleanupTask() { remoteStoreShardCleanupTask.run(); assertFalse(executed1.get()); } + + public void testParseShardPath() { + RepositoryData repoData = generateRandomRepoData(); + IndexId indexId = repoData.getIndices().values().iterator().next(); + int shardCount = repoData.shardGenerations().getGens(indexId).size(); + + String shardPath = String.join( + SnapshotShardPaths.DELIMITER, + indexId.getId(), + indexId.getName(), + String.valueOf(shardCount), + String.valueOf(indexId.getShardPathType()), + "1" + ); + ShardInfo shardInfo = SnapshotShardPaths.parseShardPath(shardPath); + + assertEquals(shardInfo.getIndexId(), indexId); + assertEquals(shardInfo.getShardCount(), shardCount); + } + + public void testWriteAndReadShardPaths() throws Exception { + BlobStoreRepository repository = setupRepo(); + RepositoryData repoData = generateRandomRepoData(); + SnapshotId snapshotId = repoData.getSnapshotIds().iterator().next(); + + Set writtenShardPaths = new HashSet<>(); + for (IndexId indexId : repoData.getIndices().values()) { + if (indexId.getShardPathType() != IndexId.DEFAULT_SHARD_PATH_TYPE) { + String shardPathBlobName = repository.writeIndexShardPaths(indexId, snapshotId, indexId.getShardPathType()); + writtenShardPaths.add(shardPathBlobName); + } + } + + // Read shard paths and verify + Map shardPathBlobs = repository.snapshotShardPathBlobContainer().listBlobs(); + + // Create sets for comparison + Set expectedPaths = new HashSet<>(writtenShardPaths); + Set actualPaths = new HashSet<>(shardPathBlobs.keySet()); + + // Remove known extra files - "extra0" file is added by the ExtrasFS, which is part of Lucene's test framework + actualPaths.remove("extra0"); + + // Check if all expected paths are present in the actual paths + assertTrue("All expected paths should be present", actualPaths.containsAll(expectedPaths)); + + // Check if there are any unexpected additional paths + Set unexpectedPaths = new HashSet<>(actualPaths); + unexpectedPaths.removeAll(expectedPaths); + if (!unexpectedPaths.isEmpty()) { + logger.warn("Unexpected additional paths found: " + unexpectedPaths); + } + + assertEquals("Expected and actual paths should match after removing known extra files", expectedPaths, actualPaths); + + for (String shardPathBlobName : expectedPaths) { + SnapshotShardPaths.ShardInfo shardInfo = SnapshotShardPaths.parseShardPath(shardPathBlobName); + IndexId indexId = repoData.getIndices().get(shardInfo.getIndexId().getName()); + assertNotNull("IndexId should not be null", indexId); + assertEquals("Index ID should match", shardInfo.getIndexId().getId(), indexId.getId()); + assertEquals("Shard path type should match", shardInfo.getIndexId().getShardPathType(), indexId.getShardPathType()); + String[] parts = shardPathBlobName.split("\\" + SnapshotShardPaths.DELIMITER); + assertEquals( + "Path hash algorithm should be FNV_1A_COMPOSITE_1", + RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1, + RemoteStoreEnums.PathHashAlgorithm.fromCode(Integer.parseInt(parts[4])) + ); + } + } + + public void testCleanupStaleIndices() throws Exception { + // Mock the BlobStoreRepository + BlobStoreRepository repository = mock(BlobStoreRepository.class); + + // Mock BlobContainer for stale index + BlobContainer staleIndexContainer = mock(BlobContainer.class); + when(staleIndexContainer.delete()).thenReturn(new DeleteResult(1, 100L)); + + // Mock BlobContainer for current index + BlobContainer currentIndexContainer = mock(BlobContainer.class); + + Map foundIndices = new HashMap<>(); + foundIndices.put("stale-index", staleIndexContainer); + foundIndices.put("current-index", currentIndexContainer); + + Set survivingIndexIds = new HashSet<>(); + survivingIndexIds.add("current-index"); + + // Create a mock RemoteStoreLockManagerFactory + RemoteStoreLockManagerFactory mockRemoteStoreLockManagerFactory = mock(RemoteStoreLockManagerFactory.class); + RemoteStoreLockManager mockLockManager = mock(RemoteStoreLockManager.class); + when(mockRemoteStoreLockManagerFactory.newLockManager(anyString(), anyString(), anyString(), any())).thenReturn(mockLockManager); + + // Create mock snapshot shard paths + Map mockSnapshotShardPaths = new HashMap<>(); + String validShardPath = "stale-index-id#stale-index#1#0#1"; + mockSnapshotShardPaths.put(validShardPath, mock(BlobMetadata.class)); + + // Mock snapshotShardPathBlobContainer + BlobContainer mockSnapshotShardPathBlobContainer = mock(BlobContainer.class); + when(mockSnapshotShardPathBlobContainer.delete()).thenReturn(new DeleteResult(1, 50L)); + when(repository.snapshotShardPathBlobContainer()).thenReturn(mockSnapshotShardPathBlobContainer); + + // Mock the cleanupStaleIndices method to call our test implementation + doAnswer(invocation -> { + Map indices = invocation.getArgument(0); + Set surviving = invocation.getArgument(1); + GroupedActionListener listener = invocation.getArgument(3); + + // Simulate the cleanup process + DeleteResult result = DeleteResult.ZERO; + for (Map.Entry entry : indices.entrySet()) { + if (!surviving.contains(entry.getKey())) { + result = result.add(entry.getValue().delete()); + } + } + result = result.add(mockSnapshotShardPathBlobContainer.delete()); + + listener.onResponse(result); + return null; + }).when(repository).cleanupStaleIndices(any(), any(), any(), any(), any(), anyMap()); + + AtomicReference> resultReference = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + + GroupedActionListener listener = new GroupedActionListener<>(ActionListener.wrap(deleteResults -> { + resultReference.set(deleteResults); + latch.countDown(); + }, e -> { + logger.error("Error in cleanupStaleIndices", e); + latch.countDown(); + }), 1); + + // Call the method we're testing + repository.cleanupStaleIndices( + foundIndices, + survivingIndexIds, + mockRemoteStoreLockManagerFactory, + listener, + mockSnapshotShardPaths, + Collections.emptyMap() + ); + + assertTrue("Cleanup did not complete within the expected time", latch.await(30, TimeUnit.SECONDS)); + + Collection results = resultReference.get(); + assertNotNull("DeleteResult collection should not be null", results); + assertFalse("DeleteResult collection should not be empty", results.isEmpty()); + + DeleteResult combinedResult = results.stream().reduce(DeleteResult.ZERO, DeleteResult::add); + + assertTrue("Bytes deleted should be greater than 0", combinedResult.bytesDeleted() > 0); + assertTrue("Blobs deleted should be greater than 0", combinedResult.blobsDeleted() > 0); + + // Verify that the stale index was processed for deletion + verify(staleIndexContainer, times(1)).delete(); + + // Verify that the current index was not processed for deletion + verify(currentIndexContainer, never()).delete(); + + // Verify that snapshot shard paths were considered in the cleanup process + verify(mockSnapshotShardPathBlobContainer, times(1)).delete(); + + // Verify the total number of bytes and blobs deleted + assertEquals("Total bytes deleted should be 150", 150L, combinedResult.bytesDeleted()); + assertEquals("Total blobs deleted should be 2", 2, combinedResult.blobsDeleted()); + } + + public void testGetMetadata() { + BlobStoreRepository repository = setupRepo(); + RepositoryMetadata metadata = repository.getMetadata(); + assertNotNull(metadata); + assertEquals(metadata.name(), "test-repo"); + assertEquals(metadata.type(), REPO_TYPE); + repository.close(); + } + + public void testGetNamedXContentRegistry() { + BlobStoreRepository repository = setupRepo(); + NamedXContentRegistry registry = repository.getNamedXContentRegistry(); + assertNotNull(registry); + repository.close(); + } + + public void testGetCompressor() { + BlobStoreRepository repository = setupRepo(); + Compressor compressor = repository.getCompressor(); + assertNotNull(compressor); + repository.close(); + } + + public void testGetStats() { + BlobStoreRepository repository = setupRepo(); + RepositoryStats stats = repository.stats(); + assertNotNull(stats); + repository.close(); + } + + public void testGetSnapshotThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getSnapshotThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetRestoreThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getRestoreThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetRemoteUploadThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getRemoteUploadThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetLowPriorityRemoteUploadThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getLowPriorityRemoteUploadThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testGetRemoteDownloadThrottleTimeInNanos() { + BlobStoreRepository repository = setupRepo(); + long throttleTime = repository.getRemoteDownloadThrottleTimeInNanos(); + assertTrue(throttleTime >= 0); + repository.close(); + } + + public void testIsReadOnly() { + BlobStoreRepository repository = setupRepo(); + assertFalse(repository.isReadOnly()); + repository.close(); + } + + public void testIsSystemRepository() { + BlobStoreRepository repository = setupRepo(); + assertFalse(repository.isSystemRepository()); + repository.close(); + } + + public void testGetRestrictedSystemRepositorySettings() { + BlobStoreRepository repository = setupRepo(); + List> settings = repository.getRestrictedSystemRepositorySettings(); + assertNotNull(settings); + assertTrue(settings.contains(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING)); + assertTrue(settings.contains(BlobStoreRepository.READONLY_SETTING)); + assertTrue(settings.contains(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY)); + repository.close(); + } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index e38fb788e65b1..9571bea53ef53 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -229,6 +229,7 @@ import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.telemetry.metrics.noop.NoopMetricsRegistry; import org.opensearch.telemetry.tracing.noop.NoopTracer; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.threadpool.ThreadPool; @@ -1501,12 +1502,8 @@ private RepositoryData getRepositoryData(Repository repository) { private StepListener createRepoAndIndex(String repoName, String index, int shards) { final StepListener createRepositoryListener = new StepListener<>(); - client().admin() - .cluster() - .preparePutRepository(repoName) - .setType(FsRepository.TYPE) - .setSettings(Settings.builder().put("location", randomAlphaOfLength(10))) - .execute(createRepositoryListener); + Settings.Builder settings = Settings.builder().put("location", randomAlphaOfLength(10)); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), repoName, FsRepository.TYPE, settings, createRepositoryListener); final StepListener createIndexResponseStepListener = new StepListener<>(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotShardPathsTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotShardPathsTests.java new file mode 100644 index 0000000000000..15eb70913eb88 --- /dev/null +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotShardPathsTests.java @@ -0,0 +1,144 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.snapshots; + +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.index.remote.RemoteStoreEnums; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class SnapshotShardPathsTests extends OpenSearchTestCase { + + public void testToXContent() throws IOException { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + SnapshotShardPaths snapshotShardPaths = new SnapshotShardPaths( + paths, + indexId, + indexName, + numberOfShards, + shardPathType, + shardPathHashAlgorithm + ); + + BytesReference bytes = XContentHelper.toXContent(snapshotShardPaths, XContentType.JSON, false); + String expectedJson = + "{\"indexId\":\"index-id\",\"indexName\":\"index-name\",\"number_of_shards\":5,\"shard_path_type\":1,\"shard_path_hash_algorithm\":0,\"paths\":[\"/path/to/shard/1\",\"/path/to/shard/2\",\"/path/to/shard/3\"]}"; + assertEquals(expectedJson, bytes.utf8ToString()); + } + + public void testMissingPaths() { + List paths = Collections.emptyList(); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.FIXED; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("paths must not be empty")); + } + + public void testMissingIndexId() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = ""; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("indexId must not be empty")); + } + + public void testMissingIndexName() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = ""; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("indexName must not be empty")); + } + + public void testMissingNumberOfShards() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 0; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("numberOfShards must be > 0")); + } + + public void testMissingShardPathType() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = null; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = RemoteStoreEnums.PathHashAlgorithm.FNV_1A_BASE64; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertTrue(exception.getMessage().contains("shardPathType must not be null")); + } + + public void testMissingShardPathHashAlgorithm() { + List paths = Arrays.asList("/path/to/shard/1", "/path/to/shard/2", "/path/to/shard/3"); + String indexId = "index-id"; + String indexName = "index-name"; + int numberOfShards = 5; + RemoteStoreEnums.PathType shardPathType = RemoteStoreEnums.PathType.HASHED_PREFIX; + RemoteStoreEnums.PathHashAlgorithm shardPathHashAlgorithm = null; + + AssertionError exception = expectThrows( + AssertionError.class, + () -> new SnapshotShardPaths(paths, indexId, indexName, numberOfShards, shardPathType, shardPathHashAlgorithm) + ); + assertEquals("shardPathHashAlgorithm must not be null", exception.getMessage()); + } + + public void testFromXContent() { + UnsupportedOperationException exception = expectThrows( + UnsupportedOperationException.class, + () -> SnapshotShardPaths.fromXContent(null) + ); + assertEquals("SnapshotShardPaths.fromXContent() is not supported", exception.getMessage()); + } +} diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index 01ca3aed54e6f..6001ce369b228 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -142,7 +142,7 @@ public static void assertConsistency(BlobStoreRepository repository, Executor ex } assertIndexUUIDs(repository, repositoryData); assertSnapshotUUIDs(repository, repositoryData); - assertShardIndexGenerations(blobContainer, repositoryData); + assertShardIndexGenerations(repository, repositoryData); return null; } catch (AssertionError e) { return e; @@ -166,14 +166,12 @@ private static void assertIndexGenerations(BlobContainer repoRoot, long latestGe assertTrue(indexGenerations.length <= 2); } - private static void assertShardIndexGenerations(BlobContainer repoRoot, RepositoryData repositoryData) throws IOException { + private static void assertShardIndexGenerations(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException { final ShardGenerations shardGenerations = repositoryData.shardGenerations(); - final BlobContainer indicesContainer = repoRoot.children().get("indices"); for (IndexId index : shardGenerations.indices()) { final List gens = shardGenerations.getGens(index); if (gens.isEmpty() == false) { - final BlobContainer indexContainer = indicesContainer.children().get(index.getId()); - final Map shardContainers = indexContainer.children(); + final Map shardContainers = getShardContainers(index, repository, repositoryData); for (int i = 0; i < gens.size(); i++) { final String generation = gens.get(i); assertThat(generation, not(ShardGenerations.DELETED_SHARD_GEN)); @@ -190,6 +188,20 @@ private static void assertShardIndexGenerations(BlobContainer repoRoot, Reposito } } + private static Map getShardContainers( + IndexId indexId, + BlobStoreRepository repository, + RepositoryData repositoryData + ) { + final Map shardContainers = new HashMap<>(); + int shardCount = repositoryData.shardGenerations().getGens(indexId).size(); + for (int i = 0; i < shardCount; i++) { + final BlobContainer shardContainer = repository.shardContainer(indexId, i); + shardContainers.put(String.valueOf(i), shardContainer); + } + return shardContainers; + } + private static void assertIndexUUIDs(BlobStoreRepository repository, RepositoryData repositoryData) throws IOException { final List expectedIndexUUIDs = repositoryData.getIndices() .values() diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java index 507a100c94e0d..e812589d64aa4 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchBlobStoreRepositoryIntegTestCase.java @@ -110,9 +110,7 @@ protected final String createRepository(final String name, final Settings settin final boolean verify = randomBoolean(); logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); - assertAcked( - client().admin().cluster().preparePutRepository(name).setType(repositoryType()).setVerify(verify).setSettings(settings) - ); + OpenSearchIntegTestCase.putRepository(client().admin().cluster(), name, repositoryType(), verify, Settings.builder().put(settings)); internalCluster().getDataOrClusterManagerNodeInstances(RepositoriesService.class).forEach(repositories -> { assertThat(repositories.repository(name), notNullValue()); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index c0265393ca7bb..16d439f706af3 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -104,7 +104,6 @@ import static org.opensearch.index.remote.RemoteStoreEnums.DataCategory.SEGMENTS; import static org.opensearch.index.remote.RemoteStoreEnums.DataType.LOCK_FILES; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -382,16 +381,6 @@ public void unblockNode(final String repository, final String node) { ((MockRepository) internalCluster().getInstance(RepositoriesService.class, node).repository(repository)).unblock(); } - protected void createRepository(String repoName, String type, Settings.Builder settings) { - logger.info("--> creating repository [{}] [{}]", repoName, type); - assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); - } - - protected void updateRepository(String repoName, String type, Settings.Builder settings) { - logger.info("--> updating repository [{}] [{}]", repoName, type); - assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); - } - protected void createRepository(String repoName, String type, Path location) { createRepository(repoName, type, Settings.builder().put("location", location)); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index cb7854d326db1..46954135bc619 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.node.hotthreads.NodeHotThreads; import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.action.admin.cluster.tasks.PendingClusterTasksResponse; @@ -70,6 +71,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.ClusterAdminClient; @@ -99,6 +101,7 @@ import org.opensearch.cluster.service.applicationtemplates.TestSystemTemplatesRepositoryPlugin; import org.opensearch.common.Nullable; import org.opensearch.common.Priority; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.network.NetworkModule; @@ -143,7 +146,9 @@ import org.opensearch.index.engine.Segment; import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; +import org.opensearch.index.remote.RemoteStoreEnums; import org.opensearch.index.remote.RemoteStoreEnums.PathType; +import org.opensearch.index.remote.RemoteStorePathStrategy; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; @@ -159,6 +164,7 @@ import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.NetworkPlugin; import org.opensearch.plugins.Plugin; +import org.opensearch.repositories.IndexId; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.repositories.fs.ReloadableFsRepository; @@ -225,6 +231,7 @@ import static org.opensearch.index.IndexSettings.INDEX_DOC_ID_FUZZY_SET_FALSE_POSITIVE_PROBABILITY_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.remote.RemoteStoreEnums.PathHashAlgorithm.FNV_1A_COMPOSITE_1; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; @@ -2582,6 +2589,85 @@ protected long getLatestSegmentInfoVersion(IndexShard shard) { } } + protected void createRepository(String repoName, String type, Settings.Builder settings, String timeout) { + logger.info("--> creating repository [{}] [{}]", repoName, type); + putRepository(clusterAdmin(), repoName, type, timeout, settings); + } + + protected void createRepository(String repoName, String type, Settings.Builder settings) { + logger.info("--> creating repository [{}] [{}]", repoName, type); + putRepository(clusterAdmin(), repoName, type, null, settings); + } + + protected void updateRepository(String repoName, String type, Settings.Builder settings) { + logger.info("--> updating repository [{}] [{}]", repoName, type); + putRepository(clusterAdmin(), repoName, type, null, settings); + } + + public static void putRepository(ClusterAdminClient adminClient, String repoName, String type, Settings.Builder settings) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, true, settings, null, false)); + } + + public static void putRepository( + ClusterAdminClient adminClient, + String repoName, + String type, + String timeout, + Settings.Builder settings + ) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, true, settings, timeout, false)); + } + + public static void putRepository( + ClusterAdminClient adminClient, + String repoName, + String type, + boolean verify, + Settings.Builder settings + ) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, verify, settings, null, false)); + } + + public static void putRepositoryWithNoSettingOverrides( + ClusterAdminClient adminClient, + String repoName, + String type, + boolean verify, + Settings.Builder settings + ) { + assertAcked(putRepositoryRequestBuilder(adminClient, repoName, type, verify, settings, null, true)); + } + + public static void putRepository( + ClusterAdminClient adminClient, + String repoName, + String type, + Settings.Builder settings, + ActionListener listener + ) { + putRepositoryRequestBuilder(adminClient, repoName, type, true, settings, null, false).execute(listener); + } + + public static PutRepositoryRequestBuilder putRepositoryRequestBuilder( + ClusterAdminClient adminClient, + String repoName, + String type, + boolean verify, + Settings.Builder settings, + String timeout, + boolean finalSettings + ) { + PutRepositoryRequestBuilder builder = adminClient.preparePutRepository(repoName).setType(type).setVerify(verify); + if (timeout != null) { + builder.setTimeout(timeout); + } + if (finalSettings == false) { + settings.put(BlobStoreRepository.SHARD_PATH_TYPE.getKey(), randomFrom(PathType.values())); + } + builder.setSettings(settings); + return builder; + } + public static Settings remoteStoreClusterSettings(String name, Path path) { return remoteStoreClusterSettings(name, path, name, path); } @@ -2823,4 +2909,15 @@ private static Settings buildRemoteStoreNodeAttributes( return settings.build(); } + public static String resolvePath(IndexId indexId, String shardId) { + PathType pathType = PathType.fromCode(indexId.getShardPathType()); + RemoteStorePathStrategy.SnapshotShardPathInput shardPathInput = new RemoteStorePathStrategy.SnapshotShardPathInput.Builder() + .basePath(BlobPath.cleanPath()) + .indexUUID(indexId.getId()) + .shardId(shardId) + .build(); + RemoteStoreEnums.PathHashAlgorithm pathHashAlgorithm = pathType != PathType.FIXED ? FNV_1A_COMPOSITE_1 : null; + BlobPath blobPath = pathType.path(shardPathInput, pathHashAlgorithm); + return blobPath.buildAsString(); + } }