diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index ba1dc3dc3b5e6..f895dfc2c1f4d 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -22,34 +22,34 @@ jobs: timeout-minutes: 130 steps: - name: Checkout OpenSearch repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: ref: ${{ github.event.pull_request.head.sha }} - name: Setup environment variables (PR) if: github.event_name == 'pull_request_target' run: | - echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_from_sha=$(jq --raw-output .pull_request.head.sha $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_from_clone_url=$(jq --raw-output .pull_request.head.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_to_clone_url=$(jq --raw-output .pull_request.base.repo.clone_url $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_title=$(jq --raw-output .pull_request.title $GITHUB_EVENT_PATH)" >> $GITHUB_ENV + echo "pr_number=$(jq --raw-output .pull_request.number $GITHUB_EVENT_PATH)" >> $GITHUB_ENV - name: Setup environment variables (Push) if: github.event_name == 'push' run: | - repo_url="https://github.com/opensearch-project/OpenSearch" - ref_id=$(git rev-parse HEAD) - branch_name=$(git rev-parse --abbrev-ref HEAD) - echo "branch_name=$branch_name" >> $GITHUB_ENV - echo "pr_from_sha=$ref_id" >> $GITHUB_ENV - echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV - echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV - echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV - echo "pr_number=Null" >> $GITHUB_ENV + repo_url="https://github.com/opensearch-project/OpenSearch" + ref_id=$(git rev-parse HEAD) + branch_name=$(git rev-parse --abbrev-ref HEAD) + echo "branch_name=$branch_name" >> $GITHUB_ENV + echo "pr_from_sha=$ref_id" >> $GITHUB_ENV + echo "pr_from_clone_url=$repo_url" >> $GITHUB_ENV + echo "pr_to_clone_url=$repo_url" >> $GITHUB_ENV + echo "pr_title=Push trigger $branch_name $ref_id $repo_url" >> $GITHUB_ENV + echo "pr_number=Null" >> $GITHUB_ENV - name: Checkout opensearch-build repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: repository: opensearch-project/opensearch-build ref: main @@ -57,17 +57,17 @@ jobs: - name: Trigger jenkins workflow to run gradle check run: | - set -e - set -o pipefail - bash opensearch-build/scripts/gradle/gradle-check.sh ${{ secrets.JENKINS_GRADLE_CHECK_GENERIC_WEBHOOK_TOKEN }} | tee -a gradle-check.log + set -e + set -o pipefail + bash opensearch-build/scripts/gradle/gradle-check.sh ${{ secrets.JENKINS_GRADLE_CHECK_GENERIC_WEBHOOK_TOKEN }} | tee -a gradle-check.log - name: Setup Result Status if: always() run: | - WORKFLOW_URL=`cat gradle-check.log | grep 'WORKFLOW_URL' | awk '{print $2}'` - RESULT=`cat gradle-check.log | grep 'Result:' | awk '{print $2}'` - echo "workflow_url=$WORKFLOW_URL" >> $GITHUB_ENV - echo "result=$RESULT" >> $GITHUB_ENV + WORKFLOW_URL=`cat gradle-check.log | grep 'WORKFLOW_URL' | awk '{print $2}'` + RESULT=`cat gradle-check.log | grep 'Result:' | awk '{print $2}'` + echo "workflow_url=$WORKFLOW_URL" >> $GITHUB_ENV + echo "result=$RESULT" >> $GITHUB_ENV - name: Upload Coverage Report if: success() @@ -81,25 +81,25 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :white_check_mark: - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} + ### Gradle Check (Jenkins) Run Completed with: + * **RESULT:** ${{ env.result }} :white_check_mark: + * **URL:** ${{ env.workflow_url }} + * **CommitID:** ${{ env.pr_from_sha }} - name: Extract Test Failure if: ${{ github.event_name == 'pull_request_target' && env.result != 'SUCCESS' }} run: | - TEST_FAILURES=`curl -s "${{ env.workflow_url }}/testReport/api/json?tree=suites\[cases\[status,className,name\]\]" | jq -r '.. | objects | select(.status=="FAILED",.status=="REGRESSION") | (.className + "." + .name)' | uniq -c | sort -n -r | head -n 10` - if [[ "$TEST_FAILURES" != "" ]] - then - echo "test_failures<> $GITHUB_ENV - echo "" >> $GITHUB_ENV - echo "* **TEST FAILURES:**" >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo "$TEST_FAILURES" >> $GITHUB_ENV - echo '```' >> $GITHUB_ENV - echo "EOF" >> $GITHUB_ENV - fi + TEST_FAILURES=`curl -s "${{ env.workflow_url }}/testReport/api/json?tree=suites\[cases\[status,className,name\]\]" | jq -r '.. | objects | select(.status=="FAILED",.status=="REGRESSION") | (.className + "." + .name)' | uniq -c | sort -n -r | head -n 10` + if [[ "$TEST_FAILURES" != "" ]] + then + echo "test_failures<> $GITHUB_ENV + echo "" >> $GITHUB_ENV + echo "* **TEST FAILURES:**" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "$TEST_FAILURES" >> $GITHUB_ENV + echo '```' >> $GITHUB_ENV + echo "EOF" >> $GITHUB_ENV + fi - name: Create Comment Flaky if: ${{ github.event_name == 'pull_request_target' && success() && env.result != 'SUCCESS' }} @@ -119,12 +119,12 @@ jobs: with: issue-number: ${{ env.pr_number }} body: | - ### Gradle Check (Jenkins) Run Completed with: - * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} - * **URL:** ${{ env.workflow_url }} - * **CommitID:** ${{ env.pr_from_sha }} - Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. - Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? + ### Gradle Check (Jenkins) Run Completed with: + * **RESULT:** ${{ env.result }} :x: ${{ env.test_failures }} + * **URL:** ${{ env.workflow_url }} + * **CommitID:** ${{ env.pr_from_sha }} + Please examine the workflow log, locate, and copy-paste the failure(s) below, then iterate to green. + Is the failure [a flaky test](https://github.com/opensearch-project/OpenSearch/blob/main/DEVELOPER_GUIDE.md#flaky-tests) unrelated to your change? - name: Create Issue On Push Failure if: ${{ github.event_name == 'push' && failure() }} diff --git a/.github/workflows/precommit.yml b/.github/workflows/precommit.yml index e264d65cdf191..8bbba657737c8 100644 --- a/.github/workflows/precommit.yml +++ b/.github/workflows/precommit.yml @@ -1,12 +1,12 @@ name: Gradle Precommit on: [pull_request] - + jobs: precommit: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-latest, windows-latest, macos-latest] + os: [windows-latest, macos-latest] # precommit on ubuntu-latest is run as a part of the gradle-check workflow steps: - uses: actions/checkout@v2 - name: Set up JDK 11 diff --git a/CHANGELOG.md b/CHANGELOG.md index bc36d6a92c3ab..55ccd366382c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,12 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `io.opencensus:opencensus-api` from 0.18.0 to 0.31.1 ([#7291](https://github.com/opensearch-project/OpenSearch/pull/7291)) - OpenJDK Update (April 2023 Patch releases) ([#7344](https://github.com/opensearch-project/OpenSearch/pull/7344) - Bump `com.google.http-client:google-http-client:1.43.2` from 1.42.0 to 1.43.2 ([7928](https://github.com/opensearch-project/OpenSearch/pull/7928))) +- Add Opentelemetry dependencies ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) +- Bump `org.bouncycastle:bcprov-jdk15on` to `org.bouncycastle:bcprov-jdk15to18` version 1.75 ([8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcmail-jdk15on` to `org.bouncycastle:bcmail-jdk15to18` version 1.75 ([8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) +- Bump `org.bouncycastle:bcpkix-jdk15on` to `org.bouncycastle:bcpkix-jdk15to18` version 1.75 ([8247](https://github.com/opensearch-project/OpenSearch/pull/8247)) + + ### Changed - [CCR] Add getHistoryOperationsFromTranslog method to fetch the history snapshot from translogs ([#3948](https://github.com/opensearch-project/OpenSearch/pull/3948)) @@ -80,9 +86,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Implement concurrent aggregations support without profile option ([#7514](https://github.com/opensearch-project/OpenSearch/pull/7514)) - Add dynamic index and cluster setting for concurrent segment search ([#7956](https://github.com/opensearch-project/OpenSearch/pull/7956)) - Add descending order search optimization through reverse segment read. ([#7967](https://github.com/opensearch-project/OpenSearch/pull/7967)) +- [Search pipelines] Added search pipelines output to node stats ([#8053](https://github.com/opensearch-project/OpenSearch/pull/8053)) - Update components of segrep backpressure to support remote store. ([#8020](https://github.com/opensearch-project/OpenSearch/pull/8020)) - Make remote cluster connection setup in async ([#8038](https://github.com/opensearch-project/OpenSearch/pull/8038)) - Add API to initialize extensions ([#8029]()https://github.com/opensearch-project/OpenSearch/pull/8029) +- Add distributed tracing framework ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) ### Dependencies - Bump `com.azure:azure-storage-common` from 12.21.0 to 12.21.1 (#7566, #7814) @@ -114,6 +122,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `io.projectreactor.netty:reactor-netty-http` from 1.1.7 to 1.1.8 (#8256) - [Upgrade] Lucene 9.7.0 release (#8272) - Bump `org.jboss.resteasy:resteasy-jackson2-provider` from 3.0.26.Final to 6.2.4.Final in /qa/wildfly ([#8209](https://github.com/opensearch-project/OpenSearch/pull/8209)) +- Bump `com.google.api-client:google-api-client` from 1.34.0 to 2.2.0 ([#8276](https://github.com/opensearch-project/OpenSearch/pull/8276)) ### Changed - Replace jboss-annotations-api_1.2_spec with jakarta.annotation-api ([#7836](https://github.com/opensearch-project/OpenSearch/pull/7836)) @@ -128,12 +137,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Compress and cache cluster state during validate join request ([#7321](https://github.com/opensearch-project/OpenSearch/pull/7321)) - [Snapshot Interop] Add Changes in Create Snapshot Flow for remote store interoperability. ([#7118](https://github.com/opensearch-project/OpenSearch/pull/7118)) - Allow insecure string settings to warn-log usage and advise to migration of a newer secure variant ([#5496](https://github.com/opensearch-project/OpenSearch/pull/5496)) +- Add self-organizing hash table to improve the performance of bucket aggregations ([#7652](https://github.com/opensearch-project/OpenSearch/pull/7652)) - Check UTF16 string size before converting to String to avoid OOME ([#7963](https://github.com/opensearch-project/OpenSearch/pull/7963)) ### Deprecated ### Removed - Remove `COMPRESSOR` variable from `CompressorFactory` and use `DEFLATE_COMPRESSOR` instead ([7907](https://github.com/opensearch-project/OpenSearch/pull/7907)) +- Remove concurrency based minimum file cache size restriction ([#8294](https://github.com/opensearch-project/OpenSearch/pull/8294)) ### Fixed - Fixing error: adding a new/forgotten parameter to the configuration for checking the config on startup in plugins/repository-s3 #7924 diff --git a/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java b/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java new file mode 100644 index 0000000000000..fa75dd2c91f5a --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/common/util/LongHashBenchmark.java @@ -0,0 +1,425 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; +import org.opensearch.common.lease.Releasable; + +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +@Fork(value = 3) +@Warmup(iterations = 1, time = 4) +@Measurement(iterations = 3, time = 2) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +public class LongHashBenchmark { + + @Benchmark + public void add(Blackhole bh, HashTableOptions tableOpts, WorkloadOptions workloadOpts) { + try (HashTable table = tableOpts.get(); WorkloadIterator iter = workloadOpts.iter()) { + while (iter.hasNext()) { + bh.consume(table.add(iter.next())); + } + } + } + + /** + * Creates a hash table with varying parameters. + */ + @State(Scope.Benchmark) + public static class HashTableOptions { + + @Param({ "LongHash", "ReorganizingLongHash" }) + public String type; + + @Param({ "1" }) + public long initialCapacity; + + @Param({ "0.6" }) + public float loadFactor; + + private Supplier supplier; + + @Setup + public void setup() { + switch (type) { + case "LongHash": + supplier = this::newLongHash; + break; + case "ReorganizingLongHash": + supplier = this::newReorganizingLongHash; + break; + default: + throw new IllegalArgumentException("invalid hash table type: " + type); + } + } + + public HashTable get() { + return supplier.get(); + } + + private HashTable newLongHash() { + return new HashTable() { + private final LongHash table = new LongHash(initialCapacity, loadFactor, BigArrays.NON_RECYCLING_INSTANCE); + + @Override + public long add(long key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }; + } + + private HashTable newReorganizingLongHash() { + return new HashTable() { + private final ReorganizingLongHash table = new ReorganizingLongHash( + initialCapacity, + loadFactor, + BigArrays.NON_RECYCLING_INSTANCE + ); + + @Override + public long add(long key) { + return table.add(key); + } + + @Override + public void close() { + table.close(); + } + }; + } + } + + /** + * Creates a workload with varying parameters. + */ + @State(Scope.Benchmark) + public static class WorkloadOptions { + public static final int NUM_HITS = 20_000_000; + + /** + * Repeat the experiment with growing number of keys. + * These values are generated with an exponential growth pattern such that: + * value = ceil(previous_value * random_float_between(1.0, 1.14)) + */ + @Param({ + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "13", + "15", + "17", + "18", + "19", + "20", + "21", + "23", + "26", + "27", + "30", + "32", + "35", + "41", + "45", + "50", + "53", + "54", + "55", + "57", + "63", + "64", + "69", + "74", + "80", + "84", + "91", + "98", + "101", + "111", + "114", + "124", + "128", + "139", + "148", + "161", + "162", + "176", + "190", + "204", + "216", + "240", + "257", + "269", + "291", + "302", + "308", + "327", + "341", + "374", + "402", + "412", + "438", + "443", + "488", + "505", + "558", + "612", + "621", + "623", + "627", + "642", + "717", + "765", + "787", + "817", + "915", + "962", + "1011", + "1083", + "1163", + "1237", + "1301", + "1424", + "1541", + "1716", + "1805", + "1817", + "1934", + "2024", + "2238", + "2281", + "2319", + "2527", + "2583", + "2639", + "2662", + "2692", + "2991", + "3201", + "3215", + "3517", + "3681", + "3710", + "4038", + "4060", + "4199", + "4509", + "4855", + "5204", + "5624", + "6217", + "6891", + "7569", + "8169", + "8929", + "9153", + "10005", + "10624", + "10931", + "12070", + "12370", + "13694", + "14227", + "15925", + "17295", + "17376", + "18522", + "19200", + "20108", + "21496", + "23427", + "24224", + "26759", + "29199", + "29897", + "32353", + "33104", + "36523", + "38480", + "38958", + "40020", + "44745", + "45396", + "47916", + "49745", + "49968", + "52231", + "53606" }) + public int size; + + @Param({ "correlated", "uncorrelated", "distinct" }) + public String dataset; + + private WorkloadIterator iterator; + + @Setup + public void setup() { + switch (dataset) { + case "correlated": + iterator = newCorrelatedWorkload(); + break; + case "uncorrelated": + iterator = newUncorrelatedWorkload(); + break; + case "distinct": + iterator = newDistinctWorkload(); + break; + default: + throw new IllegalArgumentException("invalid dataset: " + dataset); + } + } + + public WorkloadIterator iter() { + return iterator; + } + + /** + * Simulates monotonically increasing timestamp data with multiple hits mapping to the same key. + */ + private WorkloadIterator newCorrelatedWorkload() { + assert NUM_HITS >= size : "ensure hits >= size so that each key is used at least once"; + + final long[] data = new long[size]; + for (int i = 0; i < data.length; i++) { + data[i] = 1420070400000L + 3600000L * i; + } + + return new WorkloadIterator() { + private int count = 0; + private int index = 0; + private int remaining = NUM_HITS / data.length; + + @Override + public boolean hasNext() { + return count < NUM_HITS; + } + + @Override + public long next() { + if (--remaining <= 0) { + index = (index + 1) % data.length; + remaining = NUM_HITS / data.length; + } + count++; + return data[index]; + } + + @Override + public void reset() { + count = 0; + index = 0; + remaining = NUM_HITS / data.length; + } + }; + } + + /** + * Simulates uncorrelated data (such as travel distance / fare amount). + */ + private WorkloadIterator newUncorrelatedWorkload() { + assert NUM_HITS >= size : "ensure hits >= size so that each key is used at least once"; + + final Random random = new Random(0); // fixed seed for reproducible results + final long[] data = new long[size]; + for (int i = 0; i < data.length; i++) { + data[i] = Double.doubleToLongBits(20.0 + 80 * random.nextDouble()); + } + + return new WorkloadIterator() { + private int count = 0; + private int index = 0; + + @Override + public boolean hasNext() { + return count < NUM_HITS; + } + + @Override + public long next() { + count++; + index = (index + 1) % data.length; + return data[index]; + } + + @Override + public void reset() { + count = 0; + index = 0; + } + }; + } + + /** + * Simulates workload with high cardinality, i.e., each hit mapping to a different key. + */ + private WorkloadIterator newDistinctWorkload() { + return new WorkloadIterator() { + private int count = 0; + + @Override + public boolean hasNext() { + return count < size; + } + + @Override + public long next() { + return count++; + } + + @Override + public void reset() { + count = 0; + } + }; + } + } + + private interface HashTable extends Releasable { + long add(long key); + } + + private interface WorkloadIterator extends Releasable { + boolean hasNext(); + + long next(); + + void reset(); + + @Override + default void close() { + reset(); + } + } +} diff --git a/buildSrc/version.properties b/buildSrc/version.properties index dd64569259c2d..f9eac9516cb18 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -48,7 +48,7 @@ reactivestreams = 1.0.4 # when updating this version, you need to ensure compatibility with: # - plugins/ingest-attachment (transitive dependency, check the upstream POM) # - distribution/tools/plugin-cli -bouncycastle=1.70 +bouncycastle=1.75 # test dependencies randomizedrunner = 2.7.1 junit = 4.13.2 @@ -66,3 +66,7 @@ zstd = 1.5.5-3 jzlib = 1.1.3 resteasy = 6.2.4.Final + +# opentelemetry dependencies +opentelemetry = 1.26.0 + diff --git a/libs/telemetry/build.gradle b/libs/telemetry/build.gradle new file mode 100644 index 0000000000000..ce94698836b4f --- /dev/null +++ b/libs/telemetry/build.gradle @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +dependencies { + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testImplementation "junit:junit:${versions.junit}" + testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" + testImplementation(project(":test:framework")) { + exclude group: 'org.opensearch', module: 'opensearch-telemetry' + } +} + +tasks.named('forbiddenApisMain').configure { + replaceSignatureFiles 'jdk-signatures' +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java new file mode 100644 index 0000000000000..6f50699528b6b --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/Telemetry.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.tracing.TracingTelemetry; + +/** + * Interface defining telemetry + */ +public interface Telemetry { + + /** + * Provides tracing telemetry + * @return tracing telemetry instance + */ + TracingTelemetry getTracingTelemetry(); + + /** + * Provides metrics telemetry + * @return metrics telemetry instance + */ + MetricsTelemetry getMetricsTelemetry(); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java new file mode 100644 index 0000000000000..fa3b7fd192f1a --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/MetricsTelemetry.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.metrics; + +/** + * Interface for metrics telemetry providers + */ +public interface MetricsTelemetry { + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/package-info.java new file mode 100644 index 0000000000000..dfe17cc1c11ed --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/metrics/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains metrics related classes + */ +package org.opensearch.telemetry.metrics; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/package-info.java new file mode 100644 index 0000000000000..ad76f5e308bea --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains telemetry related classes + */ +package org.opensearch.telemetry; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java new file mode 100644 index 0000000000000..316edc971913e --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/AbstractSpan.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Base span + */ +public abstract class AbstractSpan implements Span { + + /** + * name of the span + */ + private final String spanName; + /** + * span's parent span + */ + private final Span parentSpan; + + /** + * Base constructor + * @param spanName name of the span + * @param parentSpan span's parent span + */ + protected AbstractSpan(String spanName, Span parentSpan) { + this.spanName = spanName; + this.parentSpan = parentSpan; + } + + @Override + public Span getParentSpan() { + return parentSpan; + } + + @Override + public String getSpanName() { + return spanName; + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java new file mode 100644 index 0000000000000..ab9110af7c3ab --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -0,0 +1,109 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.io.Closeable; +import java.io.IOException; + +/** + * + * The default tracer implementation. This class implements the basic logic for span lifecycle and its state management. + * It also handles tracing context propagation between spans. + * + * + */ +public class DefaultTracer implements Tracer { + static final String THREAD_NAME = "th_name"; + + private final TracingTelemetry tracingTelemetry; + private final TracerContextStorage tracerContextStorage; + + /** + * Creates DefaultTracer instance + * + * @param tracingTelemetry tracing telemetry instance + * @param tracerContextStorage storage used for storing current span context + */ + public DefaultTracer(TracingTelemetry tracingTelemetry, TracerContextStorage tracerContextStorage) { + this.tracingTelemetry = tracingTelemetry; + this.tracerContextStorage = tracerContextStorage; + } + + @Override + public Scope startSpan(String spanName) { + Span span = createSpan(spanName, getCurrentSpan()); + setCurrentSpanInContext(span); + addDefaultAttributes(span); + return new ScopeImpl(() -> endSpan(span)); + } + + @Override + public void addSpanAttribute(String key, String value) { + Span currentSpan = getCurrentSpan(); + currentSpan.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, long value) { + Span currentSpan = getCurrentSpan(); + currentSpan.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, double value) { + Span currentSpan = getCurrentSpan(); + currentSpan.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, boolean value) { + Span currentSpan = getCurrentSpan(); + currentSpan.addAttribute(key, value); + } + + @Override + public void addSpanEvent(String event) { + Span currentSpan = getCurrentSpan(); + currentSpan.addEvent(event); + } + + @Override + public void close() throws IOException { + ((Closeable) tracingTelemetry).close(); + } + + // Visible for testing + Span getCurrentSpan() { + return tracerContextStorage.get(TracerContextStorage.CURRENT_SPAN); + } + + private void endSpan(Span span) { + if (span != null) { + span.endSpan(); + setCurrentSpanInContext(span.getParentSpan()); + } + } + + private Span createSpan(String spanName, Span parentSpan) { + return tracingTelemetry.createSpan(spanName, parentSpan); + } + + private void setCurrentSpanInContext(Span span) { + tracerContextStorage.put(TracerContextStorage.CURRENT_SPAN, span); + } + + /** + * Adds default attributes in the span + * @param span the current active span + */ + protected void addDefaultAttributes(Span span) { + span.addAttribute(THREAD_NAME, Thread.currentThread().getName()); + } + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Scope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Scope.java new file mode 100644 index 0000000000000..52f4eaf648eea --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Scope.java @@ -0,0 +1,26 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * An auto-closeable that represents scope of the span. + * It is recommended that you use this class with a try-with-resources block: + */ +public interface Scope extends AutoCloseable { + /** + * No-op Scope implementation + */ + Scope NO_OP = () -> {}; + + /** + * closes the scope + */ + @Override + void close(); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopeImpl.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopeImpl.java new file mode 100644 index 0000000000000..30a7ac7fa90e7 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopeImpl.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Executes the runnable on close + */ +public class ScopeImpl implements Scope { + + private Runnable runnableOnClose; + + /** + * Creates Scope instance + * @param runnableOnClose runnable to execute on scope close + */ + public ScopeImpl(Runnable runnableOnClose) { + this.runnableOnClose = runnableOnClose; + } + + /** + * Executes the runnable to end the scope + */ + @Override + public void close() { + runnableOnClose.run(); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java new file mode 100644 index 0000000000000..0710b8a22a37f --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * An interface that represents a tracing span. + * Spans are created by the Tracer.startSpan method. + * Span must be ended by calling Tracer.endSpan which internally calls Span's endSpan. + */ +public interface Span { + + /** + * Ends the span + */ + void endSpan(); + + /** + * Returns span's parent span + */ + Span getParentSpan(); + + /** + * Returns the name of the {@link Span} + */ + String getSpanName(); + + /** + * Adds string type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, String value); + + /** + * Adds long type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, Long value); + + /** + * Adds double type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, Double value); + + /** + * Adds boolean type attribute in the span + * + * @param key of the attribute + * @param value value of the attribute + */ + void addAttribute(String key, Boolean value); + + /** + * Adds an event in the span + * + * @param event name of the event + */ + void addEvent(String event); + + /** + * Returns traceId of the span + * @return span's traceId + */ + String getTraceId(); + + /** + * Returns spanId of the span + * @return span's spanId + */ + String getSpanId(); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java new file mode 100644 index 0000000000000..99d1bd3c93c84 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanReference.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Wrapper class to hold reference of Span + */ +public class SpanReference { + + private Span span; + + /** + * Creates the wrapper with given span + * @param span the span object to wrap + */ + public SpanReference(Span span) { + this.span = span; + } + + /** + * Returns the span object + * @return underlying span + */ + public Span getSpan() { + return span; + } + + /** + * Updates the underlying span + * @param span underlying span + */ + public void setSpan(Span span) { + this.span = span; + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java new file mode 100644 index 0000000000000..fcc091eb39c48 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.io.Closeable; + +/** + * Tracer is the interface used to create a {@link Span} and interact with current active {@link Span}. + * It automatically handles the context propagation between threads, tasks, nodes etc. + * + * All methods on the Tracer object are multi-thread safe. + */ +public interface Tracer extends Closeable { + + /** + * Starts the {@link Span} with given name + * + * @param spanName span name + * @return scope of the span, must be closed with explicit close or with try-with-resource + */ + Scope startSpan(String spanName); + + /** + * Adds string attribute to the current active {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, String value); + + /** + * Adds long attribute to the current active {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, long value); + + /** + * Adds double attribute to the current active {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, double value); + + /** + * Adds boolean attribute to the current active {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, boolean value); + + /** + * Adds an event to the current active {@link Span}. + * + * @param event event name + */ + void addSpanEvent(String event); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java new file mode 100644 index 0000000000000..eb93006835332 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracerContextStorage.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Storage interface used for storing tracing context + * @param key type + * @param value type + */ +public interface TracerContextStorage { + /** + * Key for storing current span + */ + String CURRENT_SPAN = "current_span"; + + /** + * Fetches value corresponding to key + * @param key of the tracing context + * @return value for key + */ + V get(K key); + + /** + * Puts tracing context value with key + * @param key of the tracing context + * @param value of the tracing context + */ + void put(K key, V value); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java new file mode 100644 index 0000000000000..1152e3aedfa88 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingContextPropagator.java @@ -0,0 +1,34 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.util.Map; +import java.util.function.BiConsumer; + +/** + * Interface defining the tracing related context propagation + */ +public interface TracingContextPropagator { + + /** + * Extracts current span from context + * @param props properties + * @return current span + */ + Span extract(Map props); + + /** + * Injects tracing context + * + * @param currentSpan the current active span + * @param setter to add tracing context in map + */ + void inject(Span currentSpan, BiConsumer setter); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java new file mode 100644 index 0000000000000..16c76bd0cc141 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/TracingTelemetry.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.io.Closeable; + +/** + * Interface for tracing telemetry providers + */ +public interface TracingTelemetry extends Closeable { + + /** + * Creates span with provided arguments + * @param spanName name of the span + * @param parentSpan span's parent span + * @return span instance + */ + Span createSpan(String spanName, Span parentSpan); + + /** + * provides tracing context propagator + * @return tracing context propagator instance + */ + TracingContextPropagator getContextPropagator(); + + /** + * closes the resource + */ + void close(); + +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java new file mode 100644 index 0000000000000..18fc60e41e54d --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.telemetry.tracing.Scope; +import org.opensearch.telemetry.tracing.Tracer; + +/** + * No-op implementation of Tracer + */ +public class NoopTracer implements Tracer { + + /** + * No-op Tracer instance + */ + public static final Tracer INSTANCE = new NoopTracer(); + + private NoopTracer() {} + + @Override + public Scope startSpan(String spanName) { + return Scope.NO_OP; + } + + /** + * @param key attribute key + * @param value attribute value + */ + @Override + public void addSpanAttribute(String key, String value) { + + } + + /** + * @param key attribute key + * @param value attribute value + */ + @Override + public void addSpanAttribute(String key, long value) { + + } + + /** + * @param key attribute key + * @param value attribute value + */ + @Override + public void addSpanAttribute(String key, double value) { + + } + + /** + * @param key attribute key + * @param value attribute value + */ + @Override + public void addSpanAttribute(String key, boolean value) { + + } + + @Override + public void addSpanEvent(String event) { + + } + + @Override + public void close() { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/package-info.java new file mode 100644 index 0000000000000..b9d83e7bc7275 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains No-op implementations + */ +package org.opensearch.telemetry.tracing.noop; diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/package-info.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..66898bd58b753 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Contains tracing related classes + */ +package org.opensearch.telemetry.tracing; diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java new file mode 100644 index 0000000000000..f0e8f3c2e2344 --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.junit.Assert; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; + +public class DefaultTracerTests extends OpenSearchTestCase { + + private TracingTelemetry mockTracingTelemetry; + private TracerContextStorage mockTracerContextStorage; + private Span mockSpan; + private Span mockParentSpan; + + @Override + public void setUp() throws Exception { + super.setUp(); + setupMocks(); + } + + @Override + public void tearDown() throws Exception { + super.tearDown(); + } + + public void testCreateSpan() { + DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + + defaultTracer.startSpan("span_name"); + + Assert.assertEquals("span_name", defaultTracer.getCurrentSpan().getSpanName()); + } + + public void testEndSpanByClosingScope() { + DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + try (Scope scope = defaultTracer.startSpan("span_name")) { + verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockSpan); + } + verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockParentSpan); + } + + public void testAddSpanAttributeString() { + Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + defaultTracer.startSpan("span_name"); + + defaultTracer.addSpanAttribute("key", "value"); + + verify(mockSpan).addAttribute("key", "value"); + } + + public void testAddSpanAttributeLong() { + Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + defaultTracer.startSpan("span_name"); + + defaultTracer.addSpanAttribute("key", 1L); + + verify(mockSpan).addAttribute("key", 1L); + } + + public void testAddSpanAttributeDouble() { + Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + defaultTracer.startSpan("span_name"); + + defaultTracer.addSpanAttribute("key", 1.0); + + verify(mockSpan).addAttribute("key", 1.0); + } + + public void testAddSpanAttributeBoolean() { + Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + defaultTracer.startSpan("span_name"); + + defaultTracer.addSpanAttribute("key", true); + + verify(mockSpan).addAttribute("key", true); + } + + public void testAddEvent() { + Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + defaultTracer.startSpan("span_name"); + + defaultTracer.addSpanEvent("eventName"); + + verify(mockSpan).addEvent("eventName"); + } + + public void testClose() throws IOException { + Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); + + defaultTracer.close(); + + verify(mockTracingTelemetry).close(); + } + + @SuppressWarnings("unchecked") + private void setupMocks() { + mockTracingTelemetry = mock(TracingTelemetry.class); + mockSpan = mock(Span.class); + mockParentSpan = mock(Span.class); + mockTracerContextStorage = mock(TracerContextStorage.class); + when(mockSpan.getSpanName()).thenReturn("span_name"); + when(mockSpan.getSpanId()).thenReturn("span_id"); + when(mockSpan.getTraceId()).thenReturn("trace_id"); + when(mockSpan.getParentSpan()).thenReturn(mockParentSpan); + when(mockParentSpan.getSpanId()).thenReturn("parent_span_id"); + when(mockParentSpan.getTraceId()).thenReturn("trace_id"); + when(mockTracerContextStorage.get(TracerContextStorage.CURRENT_SPAN)).thenReturn(mockParentSpan, mockSpan); + when(mockTracingTelemetry.createSpan("span_name", mockParentSpan)).thenReturn(mockSpan); + } +} diff --git a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java index 784dad8cea49f..1f0680b27796d 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/opensearch/ingest/common/IngestRestartIT.java @@ -132,7 +132,7 @@ public void testFailureInConditionalProcessor() { for (int k = 0; k < nodeCount; k++) { List stats = r.getNodes().get(k).getIngestStats().getProcessorStats().get(pipelineId); for (IngestStats.ProcessorStat st : stats) { - assertThat(st.getStats().getIngestCurrent(), greaterThanOrEqualTo(0L)); + assertThat(st.getStats().getCurrent(), greaterThanOrEqualTo(0L)); } } } diff --git a/plugins/identity-shiro/build.gradle b/plugins/identity-shiro/build.gradle index 22dc21864b620..baa3464d0a98e 100644 --- a/plugins/identity-shiro/build.gradle +++ b/plugins/identity-shiro/build.gradle @@ -28,7 +28,7 @@ dependencies { implementation 'org.passay:passay:1.6.3' - implementation "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" + implementation "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" testImplementation project(path: ':modules:transport-netty4') // for http testImplementation project(path: ':plugins:transport-nio') // for http diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15on-1.70.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15on-1.70.jar.sha1 deleted file mode 100644 index f5e89c0f5ed45..0000000000000 --- a/plugins/identity-shiro/licenses/bcprov-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4636a0d01f74acaf28082fb62b317f1080118371 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..9911bb75f9209 --- /dev/null +++ b/plugins/identity-shiro/licenses/bcprov-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15on-LICENSE.txt b/plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15on-LICENSE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk15to18-LICENSE.txt diff --git a/plugins/identity-shiro/licenses/bcprov-jdk15on-NOTICE.txt b/plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/identity-shiro/licenses/bcprov-jdk15on-NOTICE.txt rename to plugins/identity-shiro/licenses/bcprov-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 1f3c80909733c..62651216c8144 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -71,9 +71,9 @@ dependencies { api "org.apache.pdfbox:fontbox:${versions.pdfbox}" api "org.apache.pdfbox:jempbox:1.8.17" api "commons-logging:commons-logging:${versions.commonslogging}" - api "org.bouncycastle:bcmail-jdk15on:${versions.bouncycastle}" - api "org.bouncycastle:bcprov-jdk15on:${versions.bouncycastle}" - api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" + api "org.bouncycastle:bcmail-jdk15to18:${versions.bouncycastle}" + api "org.bouncycastle:bcprov-jdk15to18:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk15to18:${versions.bouncycastle}" // OpenOffice api "org.apache.poi:poi-ooxml:${versions.poi}" api "org.apache.poi:poi:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.70.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.70.jar.sha1 deleted file mode 100644 index 672e479eda8d7..0000000000000 --- a/plugins/ingest-attachment/licenses/bcmail-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08f4aafad90f6cc7f16b9992279828ae848c9e0d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..e6840a9b02b38 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +b316bcd094e3917b1ece93a6edbab93f8315fb3b \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-LICENSE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15on-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk15to18-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcmail-jdk15on-NOTICE.txt b/plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcmail-jdk15on-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcmail-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.70.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.70.jar.sha1 deleted file mode 100644 index e348463a21257..0000000000000 --- a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f81e5af49571a9d5a109a88f239a73ce87055417 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..9181b1c3ab1b6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +f16e5252ad7a46d5eaf255231b0a5da307599082 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-LICENSE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15on-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk15to18-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcpkix-jdk15on-NOTICE.txt b/plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcpkix-jdk15on-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcpkix-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.70.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.70.jar.sha1 deleted file mode 100644 index f5e89c0f5ed45..0000000000000 --- a/plugins/ingest-attachment/licenses/bcprov-jdk15on-1.70.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4636a0d01f74acaf28082fb62b317f1080118371 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 new file mode 100644 index 0000000000000..9911bb75f9209 --- /dev/null +++ b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-1.75.jar.sha1 @@ -0,0 +1 @@ +df22e1b6a9f6b218913f5b68dd16641344397fe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-LICENSE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15on-LICENSE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk15to18-LICENSE.txt diff --git a/plugins/ingest-attachment/licenses/bcprov-jdk15on-NOTICE.txt b/plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt similarity index 100% rename from plugins/ingest-attachment/licenses/bcprov-jdk15on-NOTICE.txt rename to plugins/ingest-attachment/licenses/bcprov-jdk15to18-NOTICE.txt diff --git a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy index 0fa85f6f040f6..4b90f9a21aae4 100644 --- a/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/ingest-attachment/src/main/plugin-metadata/plugin-security.policy @@ -35,9 +35,6 @@ grant { // needed to apply additional sandboxing to tika parsing permission java.security.SecurityPermission "createAccessControlContext"; - // TODO: fix PDFBox not to actually install bouncy castle like this - permission java.security.SecurityPermission "putProviderProperty.BC"; - permission java.security.SecurityPermission "insertProvider"; // TODO: fix POI XWPF to not do this: https://bz.apache.org/bugzilla/show_bug.cgi?id=58597 permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; // needed by xmlbeans, as part of POI for MS xml docs diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 5d7a4c64ae34a..41c36dffea296 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -52,33 +52,43 @@ versions << [ ] dependencies { - api 'com.google.cloud:google-cloud-storage:1.113.1' + api 'com.google.api:api-common:1.8.1' + api 'com.google.api:gax:2.27.0' + api 'com.google.api:gax-httpjson:0.103.1' + + api 'com.google.apis:google-api-services-storage:v1-rev20230617-2.0.0' + + api 'com.google.api-client:google-api-client:2.2.0' + + api 'com.google.api.grpc:proto-google-common-protos:2.10.0' + api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' + + api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" + api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" + api 'com.google.cloud:google-cloud-core:2.5.10' + api 'com.google.cloud:google-cloud-core-http:2.17.0' + api 'com.google.cloud:google-cloud-storage:1.113.1' + + api 'com.google.code.gson:gson:2.9.0' + runtimeOnly "com.google.guava:guava:${versions.guava}" api 'com.google.guava:failureaccess:1.0.1' + api 'com.google.http-client:google-http-client:1.43.2' + api 'com.google.http-client:google-http-client-appengine:1.43.2' + api 'com.google.http-client:google-http-client-gson:1.43.2' + api 'com.google.http-client:google-http-client-jackson2:1.43.2' + + api 'com.google.oauth-client:google-oauth-client:1.34.1' + api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'com.google.api:api-common:1.8.1' - api 'com.google.api:gax:2.27.0' api 'org.threeten:threetenbp:1.4.4' - api 'com.google.code.gson:gson:2.9.0' - api 'com.google.api.grpc:proto-google-common-protos:2.10.0' - api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' - api 'com.google.cloud:google-cloud-core-http:2.17.0' - api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" - api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" - api 'com.google.oauth-client:google-oauth-client:1.33.3' - api 'com.google.api-client:google-api-client:1.34.0' - api 'com.google.http-client:google-http-client-appengine:1.43.2' - api 'com.google.http-client:google-http-client-jackson2:1.42.3' - api 'com.google.http-client:google-http-client-gson:1.41.4' - api 'com.google.api:gax-httpjson:0.103.1' api 'io.grpc:grpc-context:1.46.0' api 'io.opencensus:opencensus-api:0.31.1' api 'io.opencensus:opencensus-contrib-http-util:0.31.1' - api 'com.google.apis:google-api-services-storage:v1-rev20220608-1.32.1' testImplementation project(':test:fixtures:gcs-fixture') } @@ -167,8 +177,6 @@ thirdPartyAudit { 'org.apache.http.client.methods.HttpRequestBase', 'org.apache.http.config.Registry', 'org.apache.http.config.RegistryBuilder', - 'org.apache.http.config.SocketConfig', - 'org.apache.http.config.SocketConfig$Builder', 'org.apache.http.conn.ClientConnectionManager', 'org.apache.http.conn.ConnectionKeepAliveStrategy', 'org.apache.http.conn.params.ConnManagerParams', diff --git a/plugins/repository-gcs/licenses/google-api-client-1.34.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-1.34.0.jar.sha1 deleted file mode 100644 index 9be9480435085..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-client-1.34.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -af2586412cabeee49c9db6d736e75b745bc467f8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-client-2.2.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-client-2.2.0.jar.sha1 new file mode 100644 index 0000000000000..f9604d6837ca9 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-2.2.0.jar.sha1 @@ -0,0 +1 @@ +10e53fd4d987e37190432e896bdaa62e8ea2c628 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 deleted file mode 100644 index 07aaadb2664b2..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -74724addc6cecac408dad3a6a26423b7647b3724 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20230617-2.0.0.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20230617-2.0.0.jar.sha1 new file mode 100644 index 0000000000000..1a1452f773b96 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20230617-2.0.0.jar.sha1 @@ -0,0 +1 @@ +fc3f225b405303fe7cb760d578348b6b07e7ea8b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.41.4.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.41.4.jar.sha1 deleted file mode 100644 index 17960a99abea2..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-gson-1.41.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fa665c1c573765dd858bc34931ad747e4ed11efe \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-gson-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-gson-1.43.2.jar.sha1 new file mode 100644 index 0000000000000..df0374aa27c70 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-gson-1.43.2.jar.sha1 @@ -0,0 +1 @@ +b1c2e3e89804c113dba7b342aa8e0fc2cf3d9378 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 deleted file mode 100644 index 34d7d49f7b147..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.42.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -789cafde696403b429026bf19071caf46d8c8934 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 new file mode 100644 index 0000000000000..7b606a07651ed --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson2-1.43.2.jar.sha1 @@ -0,0 +1 @@ +5e52a9967ebd8246fc4cca64df5f03608db5ac6e \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 deleted file mode 100644 index f2afaa1bc2dba..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d445a8649b0de731922b9a3ebf1552b5403611d \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..a8434bd380761 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.34.1.jar.sha1 @@ -0,0 +1 @@ +4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle new file mode 100644 index 0000000000000..7a56621be5f1e --- /dev/null +++ b/plugins/telemetry-otel/build.gradle @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +import org.apache.tools.ant.taskdefs.condition.Os +import org.opensearch.gradle.Architecture +import org.opensearch.gradle.OS +import org.opensearch.gradle.info.BuildParams + +opensearchplugin { + description 'Opentelemetry based telemetry implementation.' + classname 'org.opensearch.telemetry.OTelTelemetryPlugin' + hasClientJar = true +} + +dependencies { + api project(":libs:opensearch-telemetry") + api "io.opentelemetry:opentelemetry-api:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-context:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk-common:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk-trace:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-sdk-metrics:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-exporter-logging:${versions.opentelemetry}" + api "io.opentelemetry:opentelemetry-semconv:${versions.opentelemetry}-alpha" + api "io.opentelemetry:opentelemetry-sdk-logs:${versions.opentelemetry}-alpha" + api "io.opentelemetry:opentelemetry-api-logs:${versions.opentelemetry}-alpha" +} + + +thirdPartyAudit { + ignoreViolations( + 'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.opentelemetry.internal.shaded.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.opentelemetry.internal.shaded.jctools.util.UnsafeAccess', + 'io.opentelemetry.internal.shaded.jctools.util.UnsafeRefArrayAccess' + ) + + ignoreMissingClasses( + 'io.opentelemetry.api.events.EventEmitter', + 'io.opentelemetry.api.events.EventEmitterBuilder', + 'io.opentelemetry.api.events.EventEmitterProvider', + 'io.opentelemetry.extension.incubator.metrics.ExtendedDoubleHistogramBuilder', + 'io.opentelemetry.extension.incubator.metrics.ExtendedLongHistogramBuilder', + 'io.opentelemetry.extension.incubator.metrics.HistogramAdviceConfigurer', + 'io.opentelemetry.sdk.autoconfigure.spi.ConfigProperties', + 'io.opentelemetry.sdk.autoconfigure.spi.logs.ConfigurableLogRecordExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.metrics.ConfigurableMetricExporterProvider', + 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' + ) +} diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..da3abcc8f70d2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.26.0.jar.sha1 @@ -0,0 +1 @@ +7ee1ccca95155e4640094ba8dfbd0bb8c1709c83 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..2c233d785dcb2 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +1b0b6c1a20da0f841634d4f736e331aa4871a4db \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-api-logs-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..01d9fd732249b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.26.0.jar.sha1 @@ -0,0 +1 @@ +42991f523a7a10761213e2f11633c67c8beaed88 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-context-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-context-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..ef07e4cb81e34 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.26.0.jar.sha1 @@ -0,0 +1 @@ +1b932170774da5e766440fa058d879f68fe2c5dd \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..dc9946de3b160 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.26.0.jar.sha1 @@ -0,0 +1 @@ +79a86f258ede8625627e8fbdff07d1149c88a8e6 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..2bd3e60a1faf6 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.26.0.jar.sha1 @@ -0,0 +1 @@ +b42359d2232f8d802d55153be5330b1d9e21ee15 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..90bb8202c4c9d --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +a8abeaee240291cce9067f07569f151d11a6275a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..62396a603423f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.26.0.jar.sha1 @@ -0,0 +1 @@ +8c4af22d7d92a3a79714be3f79724b0ab774ba9e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 new file mode 100644 index 0000000000000..0fcebee353105 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.26.0.jar.sha1 @@ -0,0 +1 @@ +fcc5785b2cf2be897f31b927e24b53e46e377388 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..47c7ece8c9f6c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +1f4f963673f8209208f868666cd43e79b9a2dd15 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-LICENSE.txt b/plugins/telemetry-otel/licenses/opentelemetry-semconv-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-NOTICE.txt b/plugins/telemetry-otel/licenses/opentelemetry-semconv-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java new file mode 100644 index 0000000000000..1c38c9dc8d6be --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/OTelTelemetryPlugin.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.telemetry.tracing.OTelResourceProvider; +import org.opensearch.telemetry.tracing.OTelTelemetry; +import org.opensearch.telemetry.tracing.OTelTracingTelemetry; + +import java.util.Arrays; +import java.util.List; +import java.util.Optional; + +/** + * Telemetry plugin based on Otel + */ +public class OTelTelemetryPlugin extends Plugin implements TelemetryPlugin { + + static final String OTEL_TRACER_NAME = "otel"; + + /** + * span exporter batch size + */ + public static final Setting TRACER_EXPORTER_BATCH_SIZE_SETTING = Setting.intSetting( + "telemetry.otel.tracer.exporter.batch_size", + 512, + 1, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + /** + * span exporter max queue size + */ + public static final Setting TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING = Setting.intSetting( + "telemetry.otel.tracer.exporter.max_queue_size", + 2048, + 1, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + /** + * span exporter delay in seconds + */ + public static final Setting TRACER_EXPORTER_DELAY_SETTING = Setting.timeSetting( + "telemetry.otel.tracer.exporter.delay", + TimeValue.timeValueSeconds(2), + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private final Settings settings; + + /** + * Creates Otel plugin + * @param settings cluster settings + */ + public OTelTelemetryPlugin(Settings settings) { + this.settings = settings; + } + + @Override + public List> getSettings() { + return Arrays.asList(TRACER_EXPORTER_BATCH_SIZE_SETTING, TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING); + } + + @Override + public Optional getTelemetry(TelemetrySettings settings) { + return Optional.of(telemetry()); + } + + @Override + public String getName() { + return OTEL_TRACER_NAME; + } + + private Telemetry telemetry() { + return new OTelTelemetry(new OTelTracingTelemetry(OTelResourceProvider.get(settings)), new MetricsTelemetry() { + }); + } + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/package-info.java new file mode 100644 index 0000000000000..4545f0ef5990e --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for telemetry. + */ +package org.opensearch.telemetry; diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelPropagatedSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelPropagatedSpan.java new file mode 100644 index 0000000000000..5aa1069e60367 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelPropagatedSpan.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +/** + * Propagated span through context propagation + */ +public class OTelPropagatedSpan extends OTelSpan { + + /** + * Creates OTelPropagatedSpan + * @param span otel propagated span + */ + public OTelPropagatedSpan(io.opentelemetry.api.trace.Span span) { + super(null, span, null); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java new file mode 100644 index 0000000000000..04bade9ec942a --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import io.opentelemetry.exporter.logging.LoggingSpanExporter; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.resources.Resource; +import io.opentelemetry.sdk.trace.SdkTracerProvider; +import io.opentelemetry.sdk.trace.export.BatchSpanProcessor; +import io.opentelemetry.sdk.trace.export.SpanExporter; +import io.opentelemetry.sdk.trace.samplers.Sampler; +import io.opentelemetry.semconv.resource.attributes.ResourceAttributes; +import org.opensearch.common.settings.Settings; + +import java.util.concurrent.TimeUnit; + +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_BATCH_SIZE_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_DELAY_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; + +/** + * This class encapsulates all OpenTelemetry related resources + */ +public final class OTelResourceProvider { + private OTelResourceProvider() {} + + /** + * Creates OpenTelemetry instance with default configuration + * @param settings cluster settings + * @return OpenTelemetry instance + */ + public static OpenTelemetry get(Settings settings) { + return get( + settings, + new LoggingSpanExporter(), + ContextPropagators.create(W3CTraceContextPropagator.getInstance()), + Sampler.alwaysOn() + ); + } + + /** + * Creates OpenTelemetry instance with provided configuration + * @param settings cluster settings + * @param spanExporter span exporter instance + * @param contextPropagators context propagator instance + * @param sampler sampler instance + * @return Opentelemetry instance + */ + public static OpenTelemetry get(Settings settings, SpanExporter spanExporter, ContextPropagators contextPropagators, Sampler sampler) { + Resource resource = Resource.create(Attributes.of(ResourceAttributes.SERVICE_NAME, "OpenSearch")); + SdkTracerProvider sdkTracerProvider = SdkTracerProvider.builder() + .addSpanProcessor(spanProcessor(settings, spanExporter)) + .setResource(resource) + .setSampler(sampler) + .build(); + + return OpenTelemetrySdk.builder().setTracerProvider(sdkTracerProvider).setPropagators(contextPropagators).buildAndRegisterGlobal(); + } + + private static BatchSpanProcessor spanProcessor(Settings settings, SpanExporter spanExporter) { + return BatchSpanProcessor.builder(spanExporter) + .setScheduleDelay(TRACER_EXPORTER_DELAY_SETTING.get(settings).getSeconds(), TimeUnit.SECONDS) + .setMaxExportBatchSize(TRACER_EXPORTER_BATCH_SIZE_SETTING.get(settings)) + .setMaxQueueSize(TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING.get(settings)) + .build(); + } + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java new file mode 100644 index 0000000000000..23a2d9baa3e6e --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.trace.Span; + +/** + * Default implementation of {@link Span} using Otel span. It keeps a reference of OpenTelemetry Span and handles span + * lifecycle management by delegating calls to it. + */ +class OTelSpan extends AbstractSpan { + + private final Span delegateSpan; + + public OTelSpan(String spanName, Span span, org.opensearch.telemetry.tracing.Span parentSpan) { + super(spanName, parentSpan); + this.delegateSpan = span; + } + + @Override + public void endSpan() { + delegateSpan.end(); + } + + @Override + public void addAttribute(String key, String value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void addAttribute(String key, Long value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void addAttribute(String key, Double value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void addAttribute(String key, Boolean value) { + delegateSpan.setAttribute(key, value); + } + + @Override + public void addEvent(String event) { + delegateSpan.addEvent(event); + } + + @Override + public String getTraceId() { + return delegateSpan.getSpanContext().getTraceId(); + } + + @Override + public String getSpanId() { + return delegateSpan.getSpanContext().getSpanId(); + } + + io.opentelemetry.api.trace.Span getDelegateSpan() { + return delegateSpan; + } + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java new file mode 100644 index 0000000000000..282fabd43346b --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTelemetry.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.metrics.MetricsTelemetry; + +/** + * Otel implementation of Telemetry + */ +public class OTelTelemetry implements Telemetry { + + private final TracingTelemetry tracingTelemetry; + private final MetricsTelemetry metricsTelemetry; + + /** + * Creates Telemetry instance + * @param tracingTelemetry tracing telemetry + * @param metricsTelemetry metrics telemetry + */ + public OTelTelemetry(TracingTelemetry tracingTelemetry, MetricsTelemetry metricsTelemetry) { + this.tracingTelemetry = tracingTelemetry; + this.metricsTelemetry = metricsTelemetry; + } + + @Override + public TracingTelemetry getTracingTelemetry() { + return tracingTelemetry; + } + + @Override + public MetricsTelemetry getMetricsTelemetry() { + return metricsTelemetry; + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java new file mode 100644 index 0000000000000..15609b39b6b94 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagator.java @@ -0,0 +1,75 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.context.Context; +import io.opentelemetry.context.propagation.TextMapGetter; +import io.opentelemetry.context.propagation.TextMapSetter; + +import java.util.Map; +import java.util.function.BiConsumer; + +/** + * Otel implementation of TracingContextPropagator + */ +public class OTelTracingContextPropagator implements TracingContextPropagator { + + private final OpenTelemetry openTelemetry; + + /** + * Creates OTelTracingContextPropagator instance + * @param openTelemetry Otel OpenTelemetry instance + */ + public OTelTracingContextPropagator(OpenTelemetry openTelemetry) { + this.openTelemetry = openTelemetry; + } + + @Override + public Span extract(Map props) { + Context context = openTelemetry.getPropagators().getTextMapPropagator().extract(Context.current(), props, TEXT_MAP_GETTER); + if (context != null) { + io.opentelemetry.api.trace.Span span = io.opentelemetry.api.trace.Span.fromContext(context); + return new OTelPropagatedSpan(span); + } + return null; + } + + @Override + public void inject(Span currentSpan, BiConsumer setter) { + openTelemetry.getPropagators().getTextMapPropagator().inject(context((OTelSpan) currentSpan), setter, TEXT_MAP_SETTER); + + } + + private static Context context(OTelSpan oTelSpan) { + return Context.current().with(io.opentelemetry.api.trace.Span.wrap(oTelSpan.getDelegateSpan().getSpanContext())); + } + + private static final TextMapSetter> TEXT_MAP_SETTER = (carrier, key, value) -> { + if (carrier != null) { + carrier.accept(key, value); + } + }; + + private static final TextMapGetter> TEXT_MAP_GETTER = new TextMapGetter<>() { + @Override + public Iterable keys(Map headers) { + return headers.keySet(); + } + + @Override + public String get(Map headers, String key) { + if (headers != null && headers.containsKey(key)) { + return headers.get(key); + } + return null; + } + }; + +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java new file mode 100644 index 0000000000000..8a0034e098461 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelTracingTelemetry.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.context.Context; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +import java.io.Closeable; +import java.io.IOException; + +/** + * OTel based Telemetry provider + */ +public class OTelTracingTelemetry implements TracingTelemetry { + + private static final Logger logger = LogManager.getLogger(OTelTracingTelemetry.class); + + private final OpenTelemetry openTelemetry; + private final io.opentelemetry.api.trace.Tracer otelTracer; + + /** + * Creates OTel based Telemetry + * @param openTelemetry OpenTelemetry instance + */ + public OTelTracingTelemetry(OpenTelemetry openTelemetry) { + this.openTelemetry = openTelemetry; + this.otelTracer = openTelemetry.getTracer("os-tracer"); + + } + + @Override + public void close() { + try { + ((Closeable) openTelemetry).close(); + } catch (IOException e) { + logger.warn("Error while closing Opentelemetry", e); + } + } + + @Override + public Span createSpan(String spanName, Span parentSpan) { + return createOtelSpan(spanName, parentSpan); + } + + @Override + public TracingContextPropagator getContextPropagator() { + return new OTelTracingContextPropagator(openTelemetry); + } + + private Span createOtelSpan(String spanName, Span parentSpan) { + io.opentelemetry.api.trace.Span otelSpan = otelSpan(spanName, parentSpan); + return new OTelSpan(spanName, otelSpan, parentSpan); + } + + io.opentelemetry.api.trace.Span otelSpan(String spanName, Span parentOTelSpan) { + return parentOTelSpan == null || !(parentOTelSpan instanceof OTelSpan) + ? otelTracer.spanBuilder(spanName).startSpan() + : otelTracer.spanBuilder(spanName).setParent(Context.current().with(((OTelSpan) parentOTelSpan).getDelegateSpan())).startSpan(); + } +} diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/package-info.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..4ac1e4c212c81 --- /dev/null +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing; diff --git a/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy new file mode 100644 index 0000000000000..0f556121915bb --- /dev/null +++ b/plugins/telemetry-otel/src/main/plugin-metadata/plugin-security.policy @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +grant { +}; + + diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java new file mode 100644 index 0000000000000..c6ffba04ac285 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/OTelTelemetryPluginTests.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.telemetry.tracing.OTelTracingTelemetry; +import org.opensearch.telemetry.tracing.TracingTelemetry; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.Optional; + +import static org.opensearch.telemetry.OTelTelemetryPlugin.OTEL_TRACER_NAME; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_BATCH_SIZE_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_DELAY_SETTING; +import static org.opensearch.telemetry.OTelTelemetryPlugin.TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING; + +public class OTelTelemetryPluginTests extends OpenSearchTestCase { + + public void testGetTelemetry() { + Set> allTracerSettings = new HashSet<>(); + ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); + Settings settings = Settings.builder().build(); + OTelTelemetryPlugin oTelTracerModulePlugin = new OTelTelemetryPlugin(settings); + Optional tracer = oTelTracerModulePlugin.getTelemetry(null); + + assertEquals(OTEL_TRACER_NAME, oTelTracerModulePlugin.getName()); + TracingTelemetry tracingTelemetry = tracer.get().getTracingTelemetry(); + assertTrue(tracingTelemetry instanceof OTelTracingTelemetry); + assertEquals( + Arrays.asList(TRACER_EXPORTER_BATCH_SIZE_SETTING, TRACER_EXPORTER_DELAY_SETTING, TRACER_EXPORTER_MAX_QUEUE_SIZE_SETTING), + oTelTracerModulePlugin.getSettings() + ); + tracingTelemetry.close(); + + } + +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java new file mode 100644 index 0000000000000..ac849e620673a --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelSpanTests.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OTelSpanTests extends OpenSearchTestCase { + + private static final String TRACE_ID = "4aa59968f31dcbff7807741afa9d7d62"; + private static final String SPAN_ID = "bea205cd25756b5e"; + + public void testEndSpanTest() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.endSpan(); + verify(mockSpan).end(); + } + + public void testAddAttributeString() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", "value"); + + verify(mockSpan).setAttribute("key", "value"); + } + + public void testAddAttributeLong() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", 1L); + + verify(mockSpan).setAttribute("key", 1L); + } + + public void testAddAttributeDouble() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", 1.0); + + verify(mockSpan).setAttribute("key", 1.0); + } + + public void testAddAttributeBoolean() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addAttribute("key", true); + + verify(mockSpan).setAttribute("key", true); + } + + public void testAddEvent() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + oTelSpan.addEvent("eventName"); + + verify(mockSpan).addEvent("eventName"); + } + + public void testGetTraceId() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + + assertEquals(TRACE_ID, oTelSpan.getTraceId()); + } + + public void testGetSpanId() { + Span mockSpan = getMockSpan(); + OTelSpan oTelSpan = new OTelSpan("spanName", mockSpan, null); + + assertEquals(SPAN_ID, oTelSpan.getSpanId()); + } + + private Span getMockSpan() { + Span mockSpan = mock(Span.class); + when(mockSpan.getSpanContext()).thenReturn(SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault())); + return mockSpan; + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java new file mode 100644 index 0000000000000..1f76b0b9def18 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingContextPropagatorTests.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.api.trace.TraceFlags; +import io.opentelemetry.api.trace.TraceState; +import io.opentelemetry.api.trace.propagation.W3CTraceContextPropagator; +import io.opentelemetry.context.propagation.ContextPropagators; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class OTelTracingContextPropagatorTests extends OpenSearchTestCase { + + private static final String TRACE_ID = "4aa59968f31dcbff7807741afa9d7d62"; + private static final String SPAN_ID = "bea205cd25756b5e"; + + public void testAddTracerContextToHeader() { + Span mockSpan = mock(Span.class); + when(mockSpan.getSpanContext()).thenReturn(SpanContext.create(TRACE_ID, SPAN_ID, TraceFlags.getDefault(), TraceState.getDefault())); + OTelSpan span = new OTelSpan("spanName", mockSpan, null); + Map requestHeaders = new HashMap<>(); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + + tracingContextPropagator.inject(span, (key, value) -> requestHeaders.put(key, value)); + assertEquals("00-" + TRACE_ID + "-" + SPAN_ID + "-00", requestHeaders.get("traceparent")); + } + + public void testExtractTracerContextFromHeader() { + Map requestHeaders = new HashMap<>(); + requestHeaders.put("traceparent", "00-" + TRACE_ID + "-" + SPAN_ID + "-00"); + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + when(mockOpenTelemetry.getPropagators()).thenReturn(ContextPropagators.create(W3CTraceContextPropagator.getInstance())); + TracingContextPropagator tracingContextPropagator = new OTelTracingContextPropagator(mockOpenTelemetry); + org.opensearch.telemetry.tracing.Span span = tracingContextPropagator.extract(requestHeaders); + assertEquals(TRACE_ID, span.getTraceId()); + assertEquals(SPAN_ID, span.getSpanId()); + } +} diff --git a/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java new file mode 100644 index 0000000000000..7dec7824b9790 --- /dev/null +++ b/plugins/telemetry-otel/src/test/java/org/opensearch/telemetry/tracing/OTelTracingTelemetryTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.trace.SpanBuilder; +import io.opentelemetry.api.trace.Tracer; +import org.opensearch.test.OpenSearchTestCase; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class OTelTracingTelemetryTests extends OpenSearchTestCase { + + public void testCreateSpanWithoutParent() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Tracer mockTracer = mock(Tracer.class); + when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); + when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + Span span = tracingTelemetry.createSpan("span_name", null); + + verify(mockSpanBuilder, never()).setParent(any()); + assertNull(span.getParentSpan()); + } + + public void testCreateSpanWithParent() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Tracer mockTracer = mock(Tracer.class); + when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + SpanBuilder mockSpanBuilder = mock(SpanBuilder.class); + when(mockTracer.spanBuilder("span_name")).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.setParent(any())).thenReturn(mockSpanBuilder); + when(mockSpanBuilder.startSpan()).thenReturn(mock(io.opentelemetry.api.trace.Span.class)); + + Span parentSpan = new OTelSpan("parent_span", mock(io.opentelemetry.api.trace.Span.class), null); + + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + Span span = tracingTelemetry.createSpan("span_name", parentSpan); + + verify(mockSpanBuilder).setParent(any()); + assertNotNull(span.getParentSpan()); + assertEquals("parent_span", span.getParentSpan().getSpanName()); + } + + public void testGetContextPropagator() { + OpenTelemetry mockOpenTelemetry = mock(OpenTelemetry.class); + Tracer mockTracer = mock(Tracer.class); + when(mockOpenTelemetry.getTracer("os-tracer")).thenReturn(mockTracer); + + TracingTelemetry tracingTelemetry = new OTelTracingTelemetry(mockOpenTelemetry); + + assertTrue(tracingTelemetry.getContextPropagator() instanceof OTelTracingContextPropagator); + } + +} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index 173aa9f6557d2..93c0bc96a5183 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -48,16 +48,12 @@ import java.io.IOException; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.either; /** * Basic test that indexed documents survive the rolling restart. See @@ -88,52 +84,51 @@ private void printClusterNodes() throws IOException, ParseException, URISyntaxEx } // Verifies that for each shard copy holds same document count across all containing nodes. - private void waitForSearchableDocs(String index, int shardCount) throws Exception { - Map primaryShardToNodeIDMap = new HashMap<>(); - Map replicaShardToNodeIDMap = new HashMap<>(); + private void waitForSearchableDocs(String index, int shardCount, int replicaCount) throws Exception { + assertTrue(shardCount > 0); + assertTrue(replicaCount > 0); waitForClusterHealthWithNoShardMigration(index, "green"); logger.info("--> _cat/shards before search \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); + // Verify segment replication stats verifySegmentStats(index); - Request request = new Request("GET", index + "/_stats"); - request.addParameter("level", "shards"); - Response response = client().performRequest(request); - for (int shardNumber = 0; shardNumber < shardCount; shardNumber++) { - List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards." + shardNumber); - for (Object shard : shardStats) { - final String nodeId = ObjectPath.evaluate(shard, "routing.node"); - final Boolean primary = ObjectPath.evaluate(shard, "routing.primary"); - if (primary) { - primaryShardToNodeIDMap.putIfAbsent(shardNumber, nodeId); - } else { - replicaShardToNodeIDMap.putIfAbsent(shardNumber, nodeId); + + // Verify segment store + assertBusy(() -> { + /** + * Use default tabular output and sort response based on shard,segment,primaryOrReplica columns to allow line by + * line parsing where records related to a segment (e.g. _0) are chunked together with first record belonging + * to primary while remaining *replicaCount* records belongs to replica copies + * */ + Request segrepStatsRequest = new Request("GET", "/_cat/segments/" + index + "?s=shard,segment,primaryOrReplica"); + segrepStatsRequest.addParameter("h", "index,shard,primaryOrReplica,segment,docs.count"); + Response segrepStatsResponse = client().performRequest(segrepStatsRequest); + logger.info("--> _cat/segments response\n {}", EntityUtils.toString(segrepStatsResponse.getEntity())); + List responseList = Streams.readAllLines(segrepStatsResponse.getEntity().getContent()); + for (int segmentsIndex=0; segmentsIndex < responseList.size();) { + String[] primaryRow = responseList.get(segmentsIndex++).split(" +"); + String shardId = primaryRow[0] + primaryRow[1]; + assertTrue(primaryRow[2].equals("p")); + for(int replicaIndex = 1; replicaIndex <= replicaCount; replicaIndex++) { + String[] replicaRow = responseList.get(segmentsIndex).split(" +"); + String replicaShardId = replicaRow[0] + replicaRow[1]; + // When segment has 0 doc count, not all replica copies posses that segment. Skip to next segment + if (replicaRow[2].equals("p")) { + assertTrue(primaryRow[4].equals("0")); + break; + } + // verify same shard id + assertTrue(replicaShardId.equals(shardId)); + // verify replica row + assertTrue(replicaRow[2].equals("r")); + // Verify segment name matches e.g. _0 + assertTrue(replicaRow[3].equals(primaryRow[3])); + // Verify doc count matches + assertTrue(replicaRow[4].equals(primaryRow[4])); + segmentsIndex++; } } - } - logger.info("--> primaryShardToNodeIDMap {}", primaryShardToNodeIDMap); - logger.info("--> replicaShardToNodeIDMap {}", replicaShardToNodeIDMap); - - for (int shardNumber = 0; shardNumber < shardCount; shardNumber++) { - Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); - searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - searchTestIndexRequest.addParameter("filter_path", "hits.total"); - searchTestIndexRequest.addParameter("preference", "_shards:" + shardNumber + "|_only_nodes:" + primaryShardToNodeIDMap.get(shardNumber)); - Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest); - final int primaryHits = ObjectPath.createFromResponse(searchTestIndexResponse).evaluate("hits.total"); - final int shardNum = shardNumber; - // Verify replica shard doc count only when available. - if (replicaShardToNodeIDMap.get(shardNum) != null) { - assertBusy(() -> { - Request replicaRequest = new Request("POST", "/" + index + "/_search"); - replicaRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - replicaRequest.addParameter("filter_path", "hits.total"); - replicaRequest.addParameter("preference", "_shards:" + shardNum + "|_only_nodes:" + replicaShardToNodeIDMap.get(shardNum)); - Response replicaResponse = client().performRequest(replicaRequest); - int replicaHits = ObjectPath.createFromResponse(replicaResponse).evaluate("hits.total"); - assertEquals("Doc count mismatch for shard " + shardNum + ". Primary hits " + primaryHits + " Replica hits " + replicaHits, primaryHits, replicaHits); - }, 1, TimeUnit.MINUTES); - } - } + }, 1, TimeUnit.MINUTES); } private void waitForClusterHealthWithNoShardMigration(String indexName, String status) throws IOException { @@ -156,7 +151,7 @@ private void verifySegmentStats(String indexName) throws Exception { String[] elements = statLine.split(" +"); assertEquals("Replica shard " + elements[0] + "not upto date with primary ", 0, Integer.parseInt(elements[2])); } - }); + }, 1, TimeUnit.MINUTES); } public void testIndexing() throws IOException, ParseException { @@ -307,7 +302,7 @@ public void testIndexingWithSegRep() throws Exception { throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); } - waitForSearchableDocs(indexName, shardCount); + waitForSearchableDocs(indexName, shardCount, replicaCount); assertCount(indexName, expectedCount); if (CLUSTER_TYPE != ClusterType.OLD) { @@ -318,17 +313,16 @@ public void testIndexingWithSegRep() throws Exception { toBeDeleted.addParameter("refresh", "true"); toBeDeleted.setJsonEntity("{\"f1\": \"delete-me\"}"); client().performRequest(toBeDeleted); - waitForSearchableDocs(indexName, shardCount); + waitForSearchableDocs(indexName, shardCount, replicaCount); assertCount(indexName, expectedCount + 6); logger.info("--> Delete previously added doc and verify doc count"); Request delete = new Request("DELETE", "/" + indexName + "/_doc/to_be_deleted"); delete.addParameter("refresh", "true"); client().performRequest(delete); - waitForSearchableDocs(indexName, shardCount); + waitForSearchableDocs(indexName, shardCount, replicaCount); assertCount(indexName, expectedCount + 5); } - logger.info("--> _cat/shards post execution \n{}", EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/shards?v")).getEntity())); } public void testAutoIdWithOpTypeCreate() throws IOException { diff --git a/release-notes/opensearch.release-notes-1.3.11.md b/release-notes/opensearch.release-notes-1.3.11.md new file mode 100644 index 0000000000000..b2589d94b4f57 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.11.md @@ -0,0 +1,13 @@ +## 2023-06-27 Version 1.3.11 Release Notes + +### Upgrades +- Upgrade `netty` from 4.1.91.Final to 4.1.93.Final ([#7901](https://github.com/opensearch-project/OpenSearch/pull/7901)) +- Upgrade `spock-core` from 1.3-groovy-2.5 to 2.3-groovy-2.5 ([#8119](https://github.com/opensearch-project/OpenSearch/pull/8119)) +- Upgrade `com.google.guava:guava` from 31.0.1-jre to 32.0.1-jre ([#8107](https://github.com/opensearch-project/OpenSearch/pull/8107)) +- Upgrade versions of gradle-info-plugin and nebula-publishing-plugin ([#8150](https://github.com/opensearch-project/OpenSearch/pull/8150)) +- Upgrade `json-smart` from 2.4.7 to 2.4.11 in plugins/repository-hdfs/ ([#8160](https://github.com/opensearch-project/OpenSearch/pull/8160)) +- Upgrade `netty` from 4.1.93.Final to 4.1.94.Final ([#8191](https://github.com/opensearch-project/OpenSearch/pull/8191)) + +### Bug Fixes +- Fix mapping char_filter when mapping a hashtag ([#7591](https://github.com/opensearch-project/OpenSearch/pull/7591)) +- Force nebula plugins to use latest org.bouncycastle:* artifacts ([#8233](https://github.com/opensearch-project/OpenSearch/pull/8233)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json index 87ab8117ec489..07148c7d261f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/snapshot.restore.json @@ -42,6 +42,10 @@ "type":"boolean", "description":"Should this request wait until the operation has completed before returning", "default":false + }, + "source_remote_store_repository": { + "type":"string", + "description":"Remote Store Repository of Remote Store Indices" } }, "body":{ diff --git a/server/build.gradle b/server/build.gradle index 2738e4351a109..ab67eabe76d0c 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -102,6 +102,8 @@ dependencies { api project(':libs:opensearch-secure-sm') api project(':libs:opensearch-x-content') api project(":libs:opensearch-geo") + api project(":libs:opensearch-telemetry") + compileOnly project(':libs:opensearch-plugin-classloader') testRuntimeOnly project(':libs:opensearch-plugin-classloader') diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index ad6e358cb9da1..01fb91f83aa02 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -63,4 +63,10 @@ public void setup() { public void teardown() { assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7592") + @Override + public void testPressureServiceStats() throws Exception { + super.testPressureServiceStats(); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 8be14d1188db8..9f492bbaee01a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -33,29 +33,44 @@ package org.opensearch.snapshots; import org.opensearch.action.ActionFuture; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; +import org.opensearch.client.Requests; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.index.IndexSettings; import org.opensearch.indices.InvalidIndexNameException; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestStatus; +import org.opensearch.test.InternalTestCluster; +import java.io.IOException; import java.nio.file.Path; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -70,6 +85,8 @@ import static org.hamcrest.Matchers.nullValue; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY; import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchQuery; @@ -81,6 +98,10 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertRequestBuilderThrows; public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase { + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(FeatureFlags.REMOTE_STORE, "true").build(); + } public void testParallelRestoreOperations() { String indexName1 = "testindex1"; @@ -152,6 +173,498 @@ public void testParallelRestoreOperations() { assertThat(client.prepareGet(restoredIndexName2, docId2).get().isExists(), equalTo(true)); } + public void testRestoreRemoteStoreIndicesWithRemoteTranslog() throws IOException, ExecutionException, InterruptedException { + testRestoreOperationsShallowCopyEnabled(true); + } + + public void testRestoreRemoteStoreIndicesWithoutRemoteTranslog() throws IOException, ExecutionException, InterruptedException { + testRestoreOperationsShallowCopyEnabled(false); + } + + public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnabled) throws IOException, ExecutionException, + InterruptedException { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + logger.info("Remote Store Repo Path [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + String restoredIndexName1Seg = indexName1 + "-restored-seg"; + String restoredIndexName1Doc = indexName1 + "-restored-doc"; + String restoredIndexName2 = indexName2 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + createRepository(remoteStoreRepoName, "fs", absolutePath2); + + Client client = client(); + Settings indexSettings = getIndexSettings(true, remoteTranslogEnabled, remoteStoreRepoName, 1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(false, false, null, 1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + final String secondNode = internalCluster().startNode(); + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + CreateSnapshotResponse createSnapshotResponse2 = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse2.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1, restoredIndexName2); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(restoredIndexName1))); + ensureRed(restoredIndexName1); + assertAcked(client().admin().indices().prepareClose(restoredIndexName1)); + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + + // restore index as seg rep enabled with remote store and remote translog disabled + RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Seg) + .get(); + assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Seg); + + GetIndexResponse getIndexResponse = client().admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_REPOSITORY, null)); + assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Seg); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); + + // restore index as doc rep based from shallow copy snapshot + RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings( + IndexMetadata.SETTING_REMOTE_STORE_ENABLED, + IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, + IndexMetadata.SETTING_REPLICATION_TYPE + ) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Doc) + .get(); + assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Doc); + + getIndexResponse = client().admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_REPOSITORY, null)); + assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Doc); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); + } + + public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + logger.info("Remote Store Repo Path [{}]", absolutePath2); + String restoredIndexName2 = indexName2 + "-restored"; + + boolean enableShallowCopy = randomBoolean(); + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, enableShallowCopy)); + createRepository(remoteStoreRepoName, "fs", absolutePath2); + + Client client = client(); + Settings indexSettings = getIndexSettings(true, randomBoolean(), remoteStoreRepoName, 1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(false, false, null, 1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + final String secondNode = internalCluster().startNode(); + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + CreateSnapshotResponse createSnapshotResponse2 = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse2.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + assertAcked(client().admin().indices().prepareClose(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1, restoredIndexName2); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(indexName1))); + ensureRed(indexName1); + assertAcked(client().admin().indices().prepareClose(indexName1)); + client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(indexName1); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(indexName1); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); + } + + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { + internalCluster().startClusterManagerOnlyNode(); + final String primaryNode = internalCluster().startNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + Path absolutePath3 = randomRepoPath().toAbsolutePath(); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + createRepository(remoteStoreRepoName, "fs", absolutePath2); + createRepository(remoteStoreRepo2Name, "fs", absolutePath3); + + Client client = client(); + Settings indexSettings = getIndexSettings(true, true, remoteStoreRepoName, 1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(false, false, null, 1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + final String secondNode = internalCluster().startNode(); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + Settings remoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStoreRepo2Name) + .build(); + // restore index as a remote store index with different remote store repo + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(restoredIndexName1))); + assertAcked(client().admin().indices().prepareClose(restoredIndexName1)); + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + // indexing some new docs and validating + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + private Settings.Builder getIndexSettings( + boolean enableRemoteStore, + boolean enableRemoteTranslog, + String remoteStoreRepo, + int numOfShards, + int numOfReplicas + ) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas); + if (enableRemoteStore) { + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStoreRepo) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); + } + if (enableRemoteTranslog) { + settingsBuilder.put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) + .build(); + } + return settingsBuilder; + } + + public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + Path absolutePath3 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + logger.info("Path 3 [{}]", absolutePath3); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + createRepository(remoteStoreRepoName, "fs", absolutePath3); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStoreRepoName) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + createRepository(remoteStoreRepoName, "fs", absolutePath2); + + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + + assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); + + ensureRed(restoredIndexName1); + + client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", absolutePath3); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) + .get(); + + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + private void indexDocuments(Client client, String indexName, int numOfDocs) { + indexDocuments(client, indexName, 0, numOfDocs); + } + + private void indexDocuments(Client client, String indexName, int fromId, int toId) { + for (int i = fromId; i < toId; i++) { + String id = Integer.toString(i); + client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + } + client.admin().indices().prepareFlush(indexName).get(); + } + + private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { + for (int i = 0; i < numOfDocs; i++) { + String id = Integer.toString(i); + logger.info("checking for index " + indexName + " with docId" + id); + assertTrue("doc with id" + id + " is not present for index " + indexName, client.prepareGet(indexName, id).get().isExists()); + } + } + public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index c183562e2e85a..6b8e06594acb7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -59,6 +59,7 @@ import org.opensearch.script.ScriptCacheStats; import org.opensearch.script.ScriptStats; import org.opensearch.search.backpressure.stats.SearchBackpressureStats; +import org.opensearch.search.pipeline.SearchPipelineStats; import org.opensearch.tasks.TaskCancellationStats; import org.opensearch.threadpool.ThreadPoolStats; import org.opensearch.transport.TransportStats; @@ -138,6 +139,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private TaskCancellationStats taskCancellationStats; + @Nullable + private SearchPipelineStats searchPipelineStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -189,6 +193,11 @@ public NodeStats(StreamInput in) throws IOException { } else { taskCancellationStats = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { // TODO Update to 2_9_0 when we backport to 2.x + searchPipelineStats = in.readOptionalWriteable(SearchPipelineStats::new); + } else { + searchPipelineStats = null; + } } public NodeStats( @@ -214,7 +223,8 @@ public NodeStats( @Nullable ClusterManagerThrottlingStats clusterManagerThrottlingStats, @Nullable WeightedRoutingStats weightedRoutingStats, @Nullable FileCacheStats fileCacheStats, - @Nullable TaskCancellationStats taskCancellationStats + @Nullable TaskCancellationStats taskCancellationStats, + @Nullable SearchPipelineStats searchPipelineStats ) { super(node); this.timestamp = timestamp; @@ -239,6 +249,7 @@ public NodeStats( this.weightedRoutingStats = weightedRoutingStats; this.fileCacheStats = fileCacheStats; this.taskCancellationStats = taskCancellationStats; + this.searchPipelineStats = searchPipelineStats; } public long getTimestamp() { @@ -371,6 +382,11 @@ public TaskCancellationStats getTaskCancellationStats() { return taskCancellationStats; } + @Nullable + public SearchPipelineStats getSearchPipelineStats() { + return searchPipelineStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -411,6 +427,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(taskCancellationStats); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { // TODO: Update to 2_9_0 once we backport to 2.x + out.writeOptionalWriteable(searchPipelineStats); + } } @Override @@ -498,6 +517,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getTaskCancellationStats() != null) { getTaskCancellationStats().toXContent(builder, params); } + if (getSearchPipelineStats() != null) { + getSearchPipelineStats().toXContent(builder, params); + } return builder; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 68f391b91507c..f37a837c6f0ef 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -211,7 +211,8 @@ public enum Metric { CLUSTER_MANAGER_THROTTLING("cluster_manager_throttling"), WEIGHTED_ROUTING_STATS("weighted_routing"), FILE_CACHE_STATS("file_cache"), - TASK_CANCELLATION("task_cancellation"); + TASK_CANCELLATION("task_cancellation"), + SEARCH_PIPELINE("search_pipeline"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 6aadf546d30f7..660142f05bab2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -123,7 +123,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.CLUSTER_MANAGER_THROTTLING.containedIn(metrics), NodesStatsRequest.Metric.WEIGHTED_ROUTING_STATS.containedIn(metrics), NodesStatsRequest.Metric.FILE_CACHE_STATS.containedIn(metrics), - NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics) + NodesStatsRequest.Metric.TASK_CANCELLATION.containedIn(metrics), + NodesStatsRequest.Metric.SEARCH_PIPELINE.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index e127b44116b7e..9ab66d726854e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -113,6 +113,8 @@ private static StorageType fromString(String string) { private Settings indexSettings = EMPTY_SETTINGS; private String[] ignoreIndexSettings = Strings.EMPTY_ARRAY; private StorageType storageType = StorageType.LOCAL; + @Nullable + private String sourceRemoteStoreRepository = null; @Nullable // if any snapshot UUID will do private String snapshotUuid; @@ -148,6 +150,9 @@ public RestoreSnapshotRequest(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_2_7_0)) { storageType = in.readEnum(StorageType.class); } + if (in.getVersion().onOrAfter(Version.V_2_9_0)) { + sourceRemoteStoreRepository = in.readOptionalString(); + } } @Override @@ -169,6 +174,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeEnum(storageType); } + if (out.getVersion().onOrAfter(Version.V_2_9_0)) { + out.writeOptionalString(sourceRemoteStoreRepository); + } } @Override @@ -521,6 +529,25 @@ public StorageType storageType() { return storageType; } + /** + * Sets Source Remote Store Repository for all the restored indices + * + * @param sourceRemoteStoreRepository name of the remote store repository that should be used for all restored indices. + */ + public RestoreSnapshotRequest setSourceRemoteStoreRepository(String sourceRemoteStoreRepository) { + this.sourceRemoteStoreRepository = sourceRemoteStoreRepository; + return this; + } + + /** + * Returns Source Remote Store Repository for all the restored indices + * + * @return source Remote Store Repository + */ + public String getSourceRemoteStoreRepository() { + return sourceRemoteStoreRepository; + } + /** * Parses restore definition * @@ -586,6 +613,12 @@ public RestoreSnapshotRequest source(Map source) { throw new IllegalArgumentException("malformed storage_type"); } + } else if (name.equals("source_remote_store_repository")) { + if (entry.getValue() instanceof String) { + setSourceRemoteStoreRepository((String) entry.getValue()); + } else { + throw new IllegalArgumentException("malformed source_remote_store_repository"); + } } else { if (IndicesOptions.isIndicesOptions(name) == false) { throw new IllegalArgumentException("Unknown parameter " + name); @@ -631,6 +664,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (storageType != null) { storageType.toXContent(builder); } + if (sourceRemoteStoreRepository != null) { + builder.field("source_remote_store_repository", sourceRemoteStoreRepository); + } builder.endObject(); return builder; } @@ -658,7 +694,8 @@ public boolean equals(Object o) { && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) && Objects.equals(snapshotUuid, that.snapshotUuid) - && Objects.equals(storageType, that.storageType); + && Objects.equals(storageType, that.storageType) + && Objects.equals(sourceRemoteStoreRepository, that.sourceRemoteStoreRepository); } @Override @@ -675,7 +712,8 @@ public int hashCode() { includeAliases, indexSettings, snapshotUuid, - storageType + storageType, + sourceRemoteStoreRepository ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 0104637a00035..d9cca536d1c41 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -256,4 +256,12 @@ public RestoreSnapshotRequestBuilder setStorageType(RestoreSnapshotRequest.Stora request.storageType(storageType); return this; } + + /** + * Sets the source remote store repository name + */ + public RestoreSnapshotRequestBuilder setSourceRemoteStoreRepository(String repositoryName) { + request.setSourceRemoteStoreRepository(repositoryName); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index 828f3a2e5e842..25a915833c7e2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -40,6 +40,7 @@ import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; @@ -800,18 +801,18 @@ static class IngestStats implements ToXContentFragment { pipelineIds.add(processorStats.getKey()); for (org.opensearch.ingest.IngestStats.ProcessorStat stat : processorStats.getValue()) { stats.compute(stat.getType(), (k, v) -> { - org.opensearch.ingest.IngestStats.Stats nodeIngestStats = stat.getStats(); + OperationStats nodeIngestStats = stat.getStats(); if (v == null) { return new long[] { - nodeIngestStats.getIngestCount(), - nodeIngestStats.getIngestFailedCount(), - nodeIngestStats.getIngestCurrent(), - nodeIngestStats.getIngestTimeInMillis() }; + nodeIngestStats.getCount(), + nodeIngestStats.getFailedCount(), + nodeIngestStats.getCurrent(), + nodeIngestStats.getTotalTimeInMillis() }; } else { - v[0] += nodeIngestStats.getIngestCount(); - v[1] += nodeIngestStats.getIngestFailedCount(); - v[2] += nodeIngestStats.getIngestCurrent(); - v[3] += nodeIngestStats.getIngestTimeInMillis(); + v[0] += nodeIngestStats.getCount(); + v[1] += nodeIngestStats.getFailedCount(); + v[2] += nodeIngestStats.getCurrent(); + v[3] += nodeIngestStats.getTotalTimeInMillis(); return v; } }); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 726f8a0de19ae..aee6dfddd203e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -167,6 +167,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index ef86eb31e2817..41e06ac2a8b24 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -257,9 +258,11 @@ public static class SnapshotRecoverySource extends RecoverySource { private final IndexId index; private final Version version; private final boolean isSearchableSnapshot; + private final boolean remoteStoreIndexShallowCopy; + private final String sourceRemoteStoreRepository; public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version version, IndexId indexId) { - this(restoreUUID, snapshot, version, indexId, false); + this(restoreUUID, snapshot, version, indexId, false, false, null); } public SnapshotRecoverySource( @@ -267,13 +270,17 @@ public SnapshotRecoverySource( Snapshot snapshot, Version version, IndexId indexId, - boolean isSearchableSnapshot + boolean isSearchableSnapshot, + boolean remoteStoreIndexShallowCopy, + @Nullable String sourceRemoteStoreRepository ) { this.restoreUUID = restoreUUID; this.snapshot = Objects.requireNonNull(snapshot); this.version = Objects.requireNonNull(version); this.index = Objects.requireNonNull(indexId); this.isSearchableSnapshot = isSearchableSnapshot; + this.remoteStoreIndexShallowCopy = remoteStoreIndexShallowCopy; + this.sourceRemoteStoreRepository = sourceRemoteStoreRepository; } SnapshotRecoverySource(StreamInput in) throws IOException { @@ -286,6 +293,13 @@ public SnapshotRecoverySource( } else { isSearchableSnapshot = false; } + if (in.getVersion().onOrAfter(Version.V_2_9_0)) { + remoteStoreIndexShallowCopy = in.readBoolean(); + sourceRemoteStoreRepository = in.readOptionalString(); + } else { + remoteStoreIndexShallowCopy = false; + sourceRemoteStoreRepository = null; + } } public String restoreUUID() { @@ -314,6 +328,14 @@ public boolean isSearchableSnapshot() { return isSearchableSnapshot; } + public String sourceRemoteStoreRepository() { + return sourceRemoteStoreRepository; + } + + public boolean remoteStoreIndexShallowCopy() { + return remoteStoreIndexShallowCopy; + } + @Override protected void writeAdditionalFields(StreamOutput out) throws IOException { out.writeString(restoreUUID); @@ -323,6 +345,10 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_7_0)) { out.writeBoolean(isSearchableSnapshot); } + if (out.getVersion().onOrAfter(Version.V_2_9_0)) { + out.writeBoolean(remoteStoreIndexShallowCopy); + out.writeOptionalString(sourceRemoteStoreRepository); + } } @Override @@ -337,7 +363,9 @@ public void addAdditionalFields(XContentBuilder builder, ToXContent.Params param .field("version", version.toString()) .field("index", index.getName()) .field("restoreUUID", restoreUUID) - .field("isSearchableSnapshot", isSearchableSnapshot); + .field("isSearchableSnapshot", isSearchableSnapshot) + .field("remoteStoreIndexShallowCopy", remoteStoreIndexShallowCopy) + .field("sourceRemoteStoreRepository", sourceRemoteStoreRepository); } @Override @@ -359,12 +387,24 @@ public boolean equals(Object o) { && snapshot.equals(that.snapshot) && index.equals(that.index) && version.equals(that.version) - && isSearchableSnapshot == that.isSearchableSnapshot; + && isSearchableSnapshot == that.isSearchableSnapshot + && remoteStoreIndexShallowCopy == that.remoteStoreIndexShallowCopy + && sourceRemoteStoreRepository != null + ? sourceRemoteStoreRepository.equals(that.sourceRemoteStoreRepository) + : that.sourceRemoteStoreRepository == null; } @Override public int hashCode() { - return Objects.hash(restoreUUID, snapshot, index, version, isSearchableSnapshot); + return Objects.hash( + restoreUUID, + snapshot, + index, + version, + isSearchableSnapshot, + remoteStoreIndexShallowCopy, + sourceRemoteStoreRepository + ); } } diff --git a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java index 79c04d431e97b..33f12c8cb42d3 100644 --- a/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java +++ b/server/src/main/java/org/opensearch/common/metrics/MeanMetric.java @@ -49,6 +49,11 @@ public void inc(long n) { sum.add(n); } + public void add(MeanMetric other) { + counter.add(other.counter.sum()); + sum.add(other.sum.sum()); + } + public void dec(long n) { counter.decrement(); sum.add(-n); diff --git a/server/src/main/java/org/opensearch/common/metrics/OperationMetrics.java b/server/src/main/java/org/opensearch/common/metrics/OperationMetrics.java new file mode 100644 index 0000000000000..97fbbc2ce5cde --- /dev/null +++ b/server/src/main/java/org/opensearch/common/metrics/OperationMetrics.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.metrics; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Mutable tracker of a repeated operation. + * + * @opensearch.internal + */ +public class OperationMetrics { + /** + * The mean time it takes to complete the measured item. + */ + private final MeanMetric time = new MeanMetric(); + /** + * The current count of things being measured. + * Useful when aggregating multiple metrics to see how many things are in flight. + */ + private final AtomicLong current = new AtomicLong(); + /** + * The non-decreasing count of failures + */ + private final CounterMetric failed = new CounterMetric(); + + /** + * Invoked before the given operation begins. + */ + public void before() { + current.incrementAndGet(); + } + + /** + * Invoked upon completion (success or failure) of the given operation + * @param currentTime elapsed time of the operation + */ + public void after(long currentTime) { + current.decrementAndGet(); + time.inc(currentTime); + } + + /** + * Invoked upon failure of the operation. + */ + public void failed() { + failed.inc(); + } + + public void add(OperationMetrics other) { + // Don't try copying over current, since in-flight requests will be linked to the existing metrics instance. + failed.inc(other.failed.count()); + time.add(other.time); + } + + /** + * @return an immutable snapshot of the current metric values. + */ + public OperationStats createStats() { + return new OperationStats(time.count(), time.sum(), current.get(), failed.count()); + } +} diff --git a/server/src/main/java/org/opensearch/common/metrics/OperationStats.java b/server/src/main/java/org/opensearch/common/metrics/OperationStats.java new file mode 100644 index 0000000000000..a820f848393bb --- /dev/null +++ b/server/src/main/java/org/opensearch/common/metrics/OperationStats.java @@ -0,0 +1,107 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.metrics; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +/** + * An immutable representation of a {@link OperationMetrics} + */ +public class OperationStats implements Writeable, ToXContentFragment { + private final long count; + private final long totalTimeInMillis; + private final long current; + private final long failedCount; + + public OperationStats(long count, long totalTimeInMillis, long current, long failedCount) { + this.count = count; + this.totalTimeInMillis = totalTimeInMillis; + this.current = current; + this.failedCount = failedCount; + } + + /** + * Read from a stream. + */ + public OperationStats(StreamInput in) throws IOException { + count = in.readVLong(); + totalTimeInMillis = in.readVLong(); + current = in.readVLong(); + failedCount = in.readVLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(count); + out.writeVLong(totalTimeInMillis); + out.writeVLong(current); + out.writeVLong(failedCount); + } + + /** + * @return The total number of executed operations. + */ + public long getCount() { + return count; + } + + /** + * @return The total time spent of in millis. + */ + public long getTotalTimeInMillis() { + return totalTimeInMillis; + } + + /** + * @return The total number of operations currently executing. + */ + public long getCurrent() { + return current; + } + + /** + * @return The total number of operations that have failed. + */ + public long getFailedCount() { + return failedCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.field("count", count) + .humanReadableField("time_in_millis", "time", new TimeValue(totalTimeInMillis, TimeUnit.MILLISECONDS)) + .field("current", current) + .field("failed", failedCount); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + OperationStats that = (OperationStats) o; + return Objects.equals(count, that.count) + && Objects.equals(totalTimeInMillis, that.totalTimeInMillis) + && Objects.equals(failedCount, that.failedCount) + && Objects.equals(current, that.current); + } + + @Override + public int hashCode() { + return Objects.hash(count, totalTimeInMillis, failedCount, current); + } +} diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index fe1d292dbd8f6..d70ea16cf5fdd 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -52,6 +52,7 @@ import org.opensearch.tasks.TaskManager; import org.opensearch.tasks.TaskResourceTrackingService; import org.opensearch.tasks.consumer.TopNSearchTasksLogger; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; import org.opensearch.action.admin.indices.close.TransportCloseIndexAction; @@ -675,6 +676,8 @@ public void apply(Settings value, Settings current, Settings previous) { IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING ), List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), - List.of(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + List.of(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING), + List.of(FeatureFlags.TELEMETRY), + List.of(TelemetrySettings.TRACER_ENABLED_SETTING) ); } diff --git a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java index 19a5808579d50..dae66c79c63ec 100644 --- a/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/FeatureFlagSettings.java @@ -39,7 +39,8 @@ protected FeatureFlagSettings( FeatureFlags.EXTENSIONS_SETTING, FeatureFlags.IDENTITY_SETTING, FeatureFlags.SEARCH_PIPELINE_SETTING, - FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING + FeatureFlags.CONCURRENT_SEGMENT_SEARCH_SETTING, + FeatureFlags.TELEMETRY_SETTING ) ) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index a26b4006bb31e..2b4dab616d00f 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -63,6 +63,11 @@ public class FeatureFlags { */ public static final String CONCURRENT_SEGMENT_SEARCH = "opensearch.experimental.feature.concurrent_segment_search.enabled"; + /** + * Gates the functionality of telemetry framework. + */ + public static final String TELEMETRY = "opensearch.experimental.feature.telemetry.enabled"; + /** * Should store the settings from opensearch.yml. */ @@ -105,6 +110,8 @@ public static boolean isEnabled(String featureFlagName) { public static final Setting IDENTITY_SETTING = Setting.boolSetting(IDENTITY, false, Property.NodeScope); + public static final Setting TELEMETRY_SETTING = Setting.boolSetting(TELEMETRY, false, Property.NodeScope); + public static final Setting CONCURRENT_SEGMENT_SEARCH_SETTING = Setting.boolSetting( CONCURRENT_SEGMENT_SEARCH, false, diff --git a/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java new file mode 100644 index 0000000000000..5789b47423c1d --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/ReorganizingLongHash.java @@ -0,0 +1,307 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.apache.lucene.util.hppc.BitMixer; +import org.opensearch.common.lease.Releasable; + +/** + * Specialized hash table implementation that maps a (primitive) long to long. + * + *

+ * It organizes itself by moving keys around dynamically in order to reduce the + * longest probe sequence length (PSL), which makes lookups faster as keys are likely to + * be found in the same CPU cache line. It also optimizes lookups for recently added keys, + * making it useful for aggregations where keys are correlated across consecutive hits. + * + *

+ * This class is not thread-safe. + * + * @opensearch.internal + */ +public class ReorganizingLongHash implements Releasable { + private static final long MAX_CAPACITY = 1L << 32; + private static final long DEFAULT_INITIAL_CAPACITY = 32; + private static final float DEFAULT_LOAD_FACTOR = 0.6f; + + /** + * Maximum load factor after which the capacity is doubled. + */ + private final float loadFactor; + + /** + * Utility class to allocate recyclable arrays. + */ + private final BigArrays bigArrays; + + /** + * Current capacity of the hash table. This must be a power of two so that the hash table slot + * can be identified quickly using bitmasks, thus avoiding expensive modulo or integer division. + */ + private long capacity; + + /** + * Bitmask to identify the hash table slot from a key's hash. + */ + private long mask; + + /** + * Size threshold after which the hash table needs to be doubled in capacity. + */ + private long grow; + + /** + * Current size of the hash table. + */ + private long size; + + /** + * Underlying array to store the hash table values. + * + *

+ * Each hash table value (64-bit) uses the following byte packing strategy: + *

+     * |=========|===============|================|================================|
+     * | Discard | PSL           | Fingerprint    | Ordinal                        |
+     * |    -    |---------------|----------------|--------------------------------|
+     * | 1 bit   | 15 bits       | 16 bits        | 32 bits                        |
+     * |=========|===============|================|================================|
+     * 
+ * + *

+ * This allows us to encode and manipulate additional information in the hash table + * itself without having to look elsewhere in the memory, which is much slower. + * + *

+ * Terminology: table[index] = value = (discard | psl | fingerprint | ordinal) + */ + private LongArray table; + + /** + * Underlying array to store the keys. + * + *

+ * Terminology: keys[ordinal] = key + */ + private LongArray keys; + + /** + * Bitmasks to manipulate the hash table values. + */ + private static final long MASK_ORDINAL = 0x00000000FFFFFFFFL; // extract ordinal + private static final long MASK_FINGERPRINT = 0x0000FFFF00000000L; // extract fingerprint + private static final long MASK_PSL = 0x7FFF000000000000L; // extract PSL + private static final long INCR_PSL = 0x0001000000000000L; // increment PSL by one + + public ReorganizingLongHash(final BigArrays bigArrays) { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, bigArrays); + } + + public ReorganizingLongHash(final long initialCapacity, final float loadFactor, final BigArrays bigArrays) { + assert initialCapacity > 0 : "initial capacity must be greater than 0"; + assert loadFactor > 0 && loadFactor < 1 : "load factor must be between 0 and 1"; + + this.bigArrays = bigArrays; + this.loadFactor = loadFactor; + + capacity = nextPowerOfTwo((long) (initialCapacity / loadFactor)); + mask = capacity - 1; + grow = (long) (capacity * loadFactor); + size = 0; + + table = bigArrays.newLongArray(capacity, false); + table.fill(0, capacity, -1); // -1 represents an empty slot + keys = bigArrays.newLongArray(initialCapacity, false); + } + + /** + * Adds the given key to the hash table and returns its ordinal. + * If the key exists already, it returns (-1 - ordinal). + */ + public long add(final long key) { + final long ordinal = find(key); + if (ordinal != -1) { + return -1 - ordinal; + } + + if (size >= grow) { + grow(); + } + + return insert(key); + } + + /** + * Returns the key associated with the given ordinal. + * The result is undefined for an unused ordinal. + */ + public long get(final long ordinal) { + return keys.get(ordinal); + } + + /** + * Returns the ordinal associated with the given key, or -1 if the key doesn't exist. + * + *

+ * Using the 64-bit hash value, up to 32 least significant bits (LSB) are used to identify the + * home slot in the hash table, and an additional 16 bits are used to identify the fingerprint. + * The fingerprint further increases the entropy and reduces the number of false lookups in the + * keys' table during equality checks, which is expensive due to an uncorrelated memory lookup. + * + *

+ * Total entropy bits = 16 + log2(capacity) + * + *

+ * Linear probing starts from the home slot, until a match or an empty slot is found. + * Values are first checked using their fingerprint (to reduce false positives), then verified + * in the keys' table using an equality check. + */ + public long find(final long key) { + final long hash = hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; + + for (long idx = hash & mask, value, ordinal;; idx = (idx + 1) & mask) { + if ((value = table.get(idx)) == -1) { + return -1; + } else if (((value & MASK_FINGERPRINT) == fingerprint) && (keys.get((ordinal = (value & MASK_ORDINAL))) == key)) { + return ordinal; + } + } + } + + /** + * Returns the number of mappings in this hash table. + */ + public long size() { + return size; + } + + /** + * Inserts the given key in the hash table and returns its ordinal. + * + *

+ * Inspired by Robin Hood Hashing (RHH): if the PSL for the existing value is less than the PSL + * for the value being inserted, swap the two values and keep going. Values that were inserted + * early and thus "lucked out" on their PSLs will gradually be moved away from their preferred + * slot as new values come in that could make better use of that place in the table. It evens out + * the PSLs across the board and reduces the longest PSL dramatically. + * + *

+ * A lower variance is better because, with modern CPU architectures, a PSL of 1 isn't much + * faster than a PSL of 3, because the main cost is fetching the cache line. The ideal hash + * table layout is the one where all values have equal PSLs, and that PSL fits within one cache line. + * + *

+ * The expected longest PSL for a full table: log(N) + * + *

+ * Our implementation has a slight variation on top of it: by loosening the guarantees provided + * by RHH, we can improve the performance on correlated lookups (such as aggregating on repeated + * timestamps) by moving the "recent" keys closer to their home slot, and eventually converging + * to the ideal hash table layout defined by RHH. + */ + private long insert(final long key) { + final long hash = hash(key); + final long fingerprint = hash & MASK_FINGERPRINT; + + // The ideal home slot for the given key. + long idx = hash & mask; + + // The value yet to find an empty slot (candidate). + long value = fingerprint | size; + + // The existing value at idx. + long existingValue; + + // Always set the newly inserted key at its ideal home slot, even if it doesn't conform + // to the RHH scheme (yet). This will ensure subsequent correlated lookups are fast due + // to no additional probing. When another insertion causes this value to be displaced, it + // will eventually be placed at an appropriate location defined by the RHH scheme. + if ((value = table.set(idx, value)) == -1) { + // The ideal home slot was already empty; append the key and return early. + return append(key); + } + + // Find an alternative slot for the displaced value such that the longest PSL is minimized. + do { + idx = (idx + 1) & mask; + value += INCR_PSL; + + if ((existingValue = table.get(idx)) == -1) { + // Empty slot; insert the candidate value here. + table.set(idx, value); + return append(key); + } else if ((existingValue & MASK_PSL) <= (value & MASK_PSL)) { + // Existing value is "richer" than the candidate value at this index; + // swap and find an alternative slot for the displaced value. + // In the case of a tie, the candidate value (i.e. the recent value) is chosen as + // the winner and kept closer to its ideal home slot in order to speed up + // correlated lookups. + value = table.set(idx, value); + } + } while (true); + } + + /** + * Appends the key in the keys' table. + */ + private long append(final long key) { + keys = bigArrays.grow(keys, size + 1); + keys.set(size, key); + return size++; + } + + /** + * Returns the hash for the given key. + * Visible for unit-tests. + */ + long hash(final long key) { + return BitMixer.mix64(key); + } + + /** + * Returns the underlying hash table. + * Visible for unit-tests. + */ + LongArray getTable() { + return table; + } + + /** + * Grows the hash table by doubling its capacity and reinserting the keys. + */ + private void grow() { + // Ensure that the hash table doesn't grow too large. + // This implicitly also ensures that the ordinals are no larger than 2^32, thus, + // preventing them from polluting other bits (PSL/fingerprint) in the hash table values. + assert capacity < MAX_CAPACITY : "hash table already at the max capacity"; + + final long oldSize = size; + capacity <<= 1; + mask = capacity - 1; + size = 0; + grow = (long) (capacity * loadFactor); + table = bigArrays.resize(table, capacity); + table.fill(0, capacity, -1); + + for (long ordinal = 0; ordinal < oldSize; ordinal++) { + insert(keys.get(ordinal)); + } + } + + @Override + public void close() { + table.close(); + keys.close(); + } + + private static long nextPowerOfTwo(final long value) { + return Math.max(1, Long.highestOneBit(value - 1) << 1); + } +} diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 5b9a77c75dddb..025fb7a36b684 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -45,6 +45,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.http.HttpTransportSettings; import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskThreadContextStatePropagator; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -56,7 +57,9 @@ import java.util.LinkedHashSet; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.BiConsumer; import java.util.function.BinaryOperator; import java.util.function.Function; @@ -66,7 +69,6 @@ import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; -import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -113,6 +115,7 @@ public final class ThreadContext implements Writeable { private final ThreadLocal threadLocal; private final int maxWarningHeaderCount; private final long maxWarningHeaderSize; + private final List propagators; /** * Creates a new ThreadContext instance @@ -123,6 +126,15 @@ public ThreadContext(Settings settings) { this.threadLocal = ThreadLocal.withInitial(() -> DEFAULT_CONTEXT); this.maxWarningHeaderCount = SETTING_HTTP_MAX_WARNING_HEADER_COUNT.get(settings); this.maxWarningHeaderSize = SETTING_HTTP_MAX_WARNING_HEADER_SIZE.get(settings).getBytes(); + this.propagators = new CopyOnWriteArrayList<>(List.of(new TaskThreadContextStatePropagator())); + } + + public void registerThreadContextStatePropagator(final ThreadContextStatePropagator propagator) { + propagators.add(Objects.requireNonNull(propagator)); + } + + public void unregisterThreadContextStatePropagator(final ThreadContextStatePropagator propagator) { + propagators.remove(Objects.requireNonNull(propagator)); } /** @@ -147,8 +159,9 @@ public StoredContext stashContext() { ); } - if (context.transientHeaders.containsKey(TASK_ID)) { - threadContextStruct = threadContextStruct.putTransient(TASK_ID, context.transientHeaders.get(TASK_ID)); + final Map transientHeaders = propagateTransients(context.transientHeaders); + if (!transientHeaders.isEmpty()) { + threadContextStruct = threadContextStruct.putTransient(transientHeaders); } threadLocal.set(threadContextStruct); @@ -166,7 +179,10 @@ public StoredContext stashContext() { */ public Writeable captureAsWriteable() { final ThreadContextStruct context = threadLocal.get(); - return out -> context.writeTo(out, defaultHeader); + return out -> { + final Map propagatedHeaders = propagateHeaders(context.transientHeaders); + context.writeTo(out, defaultHeader, propagatedHeaders); + }; } /** @@ -224,17 +240,24 @@ public StoredContext newStoredContext(boolean preserveResponseHeaders) { */ public StoredContext newStoredContext(boolean preserveResponseHeaders, Collection transientHeadersToClear) { final ThreadContextStruct originalContext = threadLocal.get(); + final Map newTransientHeaders = new HashMap<>(originalContext.transientHeaders); + + boolean transientHeadersModified = false; + final Map transientHeaders = propagateTransients(originalContext.transientHeaders); + if (!transientHeaders.isEmpty()) { + newTransientHeaders.putAll(transientHeaders); + transientHeadersModified = true; + } + // clear specific transient headers from the current context - Map newTransientHeaders = null; for (String transientHeaderToClear : transientHeadersToClear) { - if (originalContext.transientHeaders.containsKey(transientHeaderToClear)) { - if (newTransientHeaders == null) { - newTransientHeaders = new HashMap<>(originalContext.transientHeaders); - } + if (newTransientHeaders.containsKey(transientHeaderToClear)) { newTransientHeaders.remove(transientHeaderToClear); + transientHeadersModified = true; } } - if (newTransientHeaders != null) { + + if (transientHeadersModified == true) { ThreadContextStruct threadContextStruct = new ThreadContextStruct( originalContext.requestHeaders, originalContext.responseHeaders, @@ -246,6 +269,7 @@ public StoredContext newStoredContext(boolean preserveResponseHeaders, Collectio } // this is the context when this method returns final ThreadContextStruct newContext = threadLocal.get(); + return () -> { if (preserveResponseHeaders && threadLocal.get() != newContext) { threadLocal.set(originalContext.putResponseHeaders(threadLocal.get().responseHeaders)); @@ -294,7 +318,9 @@ public Supplier wrapRestorable(StoredContext storedContext) { @Override public void writeTo(StreamOutput out) throws IOException { - threadLocal.get().writeTo(out, defaultHeader); + final ThreadContextStruct context = threadLocal.get(); + final Map propagatedHeaders = propagateHeaders(context.transientHeaders); + context.writeTo(out, defaultHeader, propagatedHeaders); } /** @@ -522,6 +548,18 @@ public static Map buildDefaultHeaders(Settings settings) { } } + private Map propagateTransients(Map source) { + final Map transients = new HashMap<>(); + propagators.forEach(p -> transients.putAll(p.transients(source))); + return transients; + } + + private Map propagateHeaders(Map source) { + final Map headers = new HashMap<>(); + propagators.forEach(p -> headers.putAll(p.headers(source))); + return headers; + } + private static final class ThreadContextStruct { private static final ThreadContextStruct EMPTY = new ThreadContextStruct( @@ -695,6 +733,14 @@ private ThreadContextStruct putResponse( return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); } + private ThreadContextStruct putTransient(Map values) { + Map newTransient = new HashMap<>(this.transientHeaders); + for (Map.Entry entry : values.entrySet()) { + putSingleHeader(entry.getKey(), entry.getValue(), newTransient); + } + return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, isSystemContext); + } + private ThreadContextStruct putTransient(String key, Object value) { Map newTransient = new HashMap<>(this.transientHeaders); putSingleHeader(key, value, newTransient); @@ -709,13 +755,15 @@ private ThreadContextStruct copyHeaders(Iterable> head return putHeaders(newHeaders); } - private void writeTo(StreamOutput out, Map defaultHeaders) throws IOException { + private void writeTo(StreamOutput out, Map defaultHeaders, Map propagatedHeaders) + throws IOException { final Map requestHeaders; - if (defaultHeaders.isEmpty()) { + if (defaultHeaders.isEmpty() && propagatedHeaders.isEmpty()) { requestHeaders = this.requestHeaders; } else { requestHeaders = new HashMap<>(defaultHeaders); requestHeaders.putAll(this.requestHeaders); + requestHeaders.putAll(propagatedHeaders); } out.writeVInt(requestHeaders.size()); diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java new file mode 100644 index 0000000000000..b3fc79c5446db --- /dev/null +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContextStatePropagator.java @@ -0,0 +1,31 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util.concurrent; + +import java.util.Map; + +/** + * The propagator for {@link ThreadContext} that helps to carry-over the state from one + * thread to another (tasks, tracing context, etc). + */ +public interface ThreadContextStatePropagator { + /** + * Returns the list of transient headers that needs to be propagated from current context to new thread context. + * @param source current context transient headers + * @return the list of transient headers that needs to be propagated from current context to new thread context + */ + Map transients(Map source); + + /** + * Returns the list of request headers that needs to be propagated from current context to request. + * @param source current context headers + * @return the list of request headers that needs to be propagated from current context to request + */ + Map headers(Map source); +} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 9938d11caca13..d89d51c713d70 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -44,6 +44,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.Term; +import org.apache.lucene.index.IndexFileNames; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.ReferenceManager; @@ -1495,7 +1496,7 @@ public GatedCloseable acquireLastIndexCommitAndRefresh(boolean flus * @throws IOException if there is some failure in acquiring lock in remote store. */ public void acquireLockOnCommitData(String snapshotId, long primaryTerm, long generation) throws IOException { - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteSegmentDirectoryForShard(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteDirectory(); remoteSegmentStoreDirectory.acquireLock(primaryTerm, generation, snapshotId); } @@ -1507,20 +1508,10 @@ public void acquireLockOnCommitData(String snapshotId, long primaryTerm, long ge * @throws IOException if there is some failure in releasing lock in remote store. */ public void releaseLockOnCommitData(String snapshotId, long primaryTerm, long generation) throws IOException { - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteSegmentDirectoryForShard(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = getRemoteDirectory(); remoteSegmentStoreDirectory.releaseLock(primaryTerm, generation, snapshotId); } - private RemoteSegmentStoreDirectory getRemoteSegmentDirectoryForShard() { - FilterDirectory remoteStoreDirectory = (FilterDirectory) remoteStore.directory(); - assert remoteStoreDirectory.getDelegate() instanceof FilterDirectory - : "Store.directory is not enclosing an instance of FilterDirectory"; - FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); - final Directory remoteDirectory = byteSizeCachingStoreDirectory.getDelegate(); - assert remoteDirectory instanceof RemoteSegmentStoreDirectory : "remoteDirectory is not an instance of RemoteSegmentStoreDirectory"; - return ((RemoteSegmentStoreDirectory) remoteDirectory); - } - public Optional getReplicationEngine() { if (getEngine() instanceof NRTReplicationEngine) { return Optional.of((NRTReplicationEngine) getEngine()); @@ -2290,7 +2281,24 @@ public void openEngineAndSkipTranslogRecovery() throws IOException { getEngine().translogManager().skipTranslogRecovery(); } + public void openEngineAndSkipTranslogRecoveryFromSnapshot() throws IOException { + assert routingEntry().recoverySource().getType() == RecoverySource.Type.SNAPSHOT : "not a snapshot recovery [" + + routingEntry() + + "]"; + recoveryState.validateCurrentStage(RecoveryState.Stage.INDEX); + maybeCheckIndex(); + recoveryState.setStage(RecoveryState.Stage.TRANSLOG); + recoveryState.validateCurrentStage(RecoveryState.Stage.TRANSLOG); + loadGlobalCheckpointToReplicationTracker(); + innerOpenEngineAndTranslog(replicationTracker, false); + getEngine().translogManager().skipTranslogRecovery(); + } + private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) throws IOException { + innerOpenEngineAndTranslog(globalCheckpointSupplier, true); + } + + private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, boolean syncFromRemote) throws IOException { assert Thread.holdsLock(mutex) == false : "opening engine under mutex"; if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); @@ -2309,11 +2317,20 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier) t synchronized (engineMutex) { assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); - if (indexSettings.isRemoteStoreEnabled()) { + if (indexSettings.isRemoteStoreEnabled() && syncFromRemote) { syncSegmentsFromRemoteSegmentStore(false, true, true); } if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { - syncRemoteTranslogAndUpdateGlobalCheckpoint(); + if (syncFromRemote) { + syncRemoteTranslogAndUpdateGlobalCheckpoint(); + } else { + // we will enter this block when we do not want to recover from remote translog. + // currently only during snapshot restore, we are coming into this block. + // here, as while initiliazing remote translog we cannot skip downloading translog files, + // so before that step, we are deleting the translog files present in remote store. + deleteTranslogFilesFromRemoteTranslog(); + + } } // we must create a new engine under mutex (see IndexShard#snapshotStoreMetadata). final Engine newEngine = engineFactory.newReadWriteEngine(config); @@ -2605,6 +2622,22 @@ public void restoreFromRemoteStore(ActionListener listener) { storeRecovery.recoverFromRemoteStore(this, listener); } + public void restoreFromSnapshotAndRemoteStore( + Repository repository, + RepositoriesService repositoriesService, + ActionListener listener + ) { + try { + assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; + assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + + recoveryState.getRecoverySource(); + StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); + storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener); + } catch (Exception e) { + listener.onFailure(e); + } + } + public void restoreFromRepository(Repository repository, ActionListener listener) { try { assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; @@ -3418,6 +3451,15 @@ public void startRecovery( final SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) recoveryState.getRecoverySource(); if (recoverySource.isSearchableSnapshot()) { executeRecovery("from snapshot (remote)", recoveryState, recoveryListener, this::recoverFromStore); + } else if (recoverySource.remoteStoreIndexShallowCopy()) { + final String repo = recoverySource.snapshot().getRepository(); + executeRecovery( + "from snapshot and remote store", + recoveryState, + recoveryListener, + l -> restoreFromSnapshotAndRemoteStore(repositoriesService.repository(repo), repositoriesService, l) + ); + // indicesService.indexService(shardRouting.shardId().getIndex()).addMetadataListener(); } else { final String repo = recoverySource.snapshot().getRepository(); executeRecovery( @@ -4536,6 +4578,13 @@ private void syncRemoteTranslogAndUpdateGlobalCheckpoint() throws IOException { loadGlobalCheckpointToReplicationTracker(); } + public void deleteTranslogFilesFromRemoteTranslog() throws IOException { + TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); + assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; + Repository repository = ((RemoteBlobStoreInternalTranslogFactory) translogFactory).getRepository(); + RemoteFsTranslog.cleanup(repository, shardId, getThreadPool()); + } + public void syncTranslogFilesFromRemoteTranslog() throws IOException { TranslogFactory translogFactory = translogFactorySupplier.apply(indexSettings, shardRouting); assert translogFactory instanceof RemoteBlobStoreInternalTranslogFactory; @@ -4558,12 +4607,11 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re // We need to call RemoteSegmentStoreDirectory.init() in order to get latest metadata of the files that // are uploaded to the remote segment store. RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.init(); - Map uploadedSegments = ((RemoteSegmentStoreDirectory) remoteDirectory) + + Map uploadedSegments = remoteDirectory .getSegmentsUploadedToRemoteStore(); store.incRef(); remoteStore.incRef(); - List downloadedSegments = new ArrayList<>(); - List skippedSegments = new ArrayList<>(); try { final Directory storeDirectory; if (recoveryState.getStage() == RecoveryState.Stage.INDEX) { @@ -4580,18 +4628,7 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re storeDirectory = store.directory(); } Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); - for (String file : uploadedSegments.keySet()) { - long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); - if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { - if (localSegmentFiles.contains(file)) { - storeDirectory.deleteFile(file); - } - storeDirectory.copyFrom(remoteDirectory, file, file, IOContext.DEFAULT); - downloadedSegments.add(file); - } else { - skippedSegments.add(file); - } - } + copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal); if (refreshLevelSegmentSync && remoteSegmentMetadata != null) { try ( @@ -4637,13 +4674,113 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re } catch (IOException e) { throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); } finally { - logger.info("Downloaded segments: {}", downloadedSegments); - logger.info("Skipped download for segments: {}", skippedSegments); store.decRef(); remoteStore.decRef(); } } + /** + * Downloads segments from given remote segment store for a specific commit. + * @param overrideLocal flag to override local segment files with those in remote store + * @param sourceRemoteDirectory RemoteSegmentDirectory Instance from which we need to sync segments + * @param primaryTerm Primary Term for shard at the time of commit operation for which we are syncing segments + * @param commitGeneration commit generation at the time of commit operation for which we are syncing segments + * @throws IOException if exception occurs while reading segments from remote store + */ + public void syncSegmentsFromGivenRemoteSegmentStore( + boolean overrideLocal, + RemoteSegmentStoreDirectory sourceRemoteDirectory, + long primaryTerm, + long commitGeneration + ) throws IOException { + logger.info("Downloading segments from given remote segment store"); + RemoteSegmentStoreDirectory remoteDirectory = null; + if (remoteStore != null) { + remoteDirectory = getRemoteDirectory(); + remoteDirectory.init(); + remoteStore.incRef(); + } + Map uploadedSegments = sourceRemoteDirectory + .initializeToSpecificCommit(primaryTerm, commitGeneration) + .getMetadata(); + final Directory storeDirectory = store.directory(); + store.incRef(); + + try { + String segmentsNFile = copySegmentFiles( + storeDirectory, + sourceRemoteDirectory, + remoteDirectory, + uploadedSegments, + overrideLocal + ); + if (segmentsNFile != null) { + try ( + ChecksumIndexInput indexInput = new BufferedChecksumIndexInput( + storeDirectory.openInput(segmentsNFile, IOContext.DEFAULT) + ) + ) { + SegmentInfos infosSnapshot = SegmentInfos.readCommit(store.directory(), indexInput, commitGeneration); + long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY)); + if (remoteStore != null) { + store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); + } else { + store.directory().sync(infosSnapshot.files(true)); + store.directory().syncMetaData(); + } + } + } + } catch (IOException e) { + throw new IndexShardRecoveryException(shardId, "Exception while copying segment files from remote segment store", e); + } finally { + store.decRef(); + if (remoteStore != null) { + remoteStore.decRef(); + } + } + } + + private String copySegmentFiles( + Directory storeDirectory, + RemoteSegmentStoreDirectory sourceRemoteDirectory, + RemoteSegmentStoreDirectory targetRemoteDirectory, + Map uploadedSegments, + boolean overrideLocal + ) throws IOException { + List downloadedSegments = new ArrayList<>(); + List skippedSegments = new ArrayList<>(); + String segmentNFile = null; + try { + Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); + if (overrideLocal) { + for (String file : localSegmentFiles) { + storeDirectory.deleteFile(file); + } + } + for (String file : uploadedSegments.keySet()) { + long checksum = Long.parseLong(uploadedSegments.get(file).getChecksum()); + if (overrideLocal || localDirectoryContains(storeDirectory, file, checksum) == false) { + storeDirectory.copyFrom(sourceRemoteDirectory, file, file, IOContext.DEFAULT); + storeDirectory.sync(Collections.singleton(file)); + downloadedSegments.add(file); + } else { + skippedSegments.add(file); + } + if (targetRemoteDirectory != null) { + targetRemoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); + } + if (file.startsWith(IndexFileNames.SEGMENTS)) { + assert segmentNFile == null : "There should be only one SegmentInfosSnapshot file"; + segmentNFile = file; + } + } + } finally { + logger.info("Downloaded segments here: {}", downloadedSegments); + logger.info("Skipped download for segments here: {}", skippedSegments); + } + return segmentNFile; + } + private boolean localDirectoryContains(Directory localDirectory, String file, long checksum) { try (IndexInput indexInput = localDirectory.openInput(file, IOContext.DEFAULT)) { if (checksum == CodecUtil.retrieveChecksum(indexInput)) { diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index d7f7373e83bd0..7cfaaafcadd39 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -29,6 +29,7 @@ import org.opensearch.index.remote.RemoteRefreshSegmentTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.threadpool.Scheduler; @@ -71,6 +72,8 @@ public final class RemoteStoreRefreshListener implements ReferenceManager.Refres */ private static final int REMOTE_REFRESH_RETRY_MAX_INTERVAL_MILLIS = 10_000; + private static final int INVALID_PRIMARY_TERM = -1; + /** * Exponential back off policy with max retry interval. */ @@ -118,15 +121,18 @@ public RemoteStoreRefreshListener( this.storeDirectory = indexShard.store().directory(); this.remoteDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()) .getDelegate()).getDelegate(); - this.primaryTerm = indexShard.getOperationPrimaryTerm(); localSegmentChecksumMap = new HashMap<>(); + RemoteSegmentMetadata remoteSegmentMetadata = null; if (indexShard.routingEntry().primary()) { try { - this.remoteDirectory.init(); + remoteSegmentMetadata = this.remoteDirectory.init(); } catch (IOException e) { logger.error("Exception while initialising RemoteSegmentStoreDirectory", e); } } + // initializing primary term with the primary term of latest metadata in remote store. + // if no metadata is present, this value will be initilized with -1. + this.primaryTerm = remoteSegmentMetadata != null ? remoteSegmentMetadata.getPrimaryTerm() : INVALID_PRIMARY_TERM; this.segmentTracker = segmentTracker; resetBackOffDelayIterator(); this.checkpointPublisher = checkpointPublisher; @@ -163,8 +169,9 @@ public void beforeRefresh() throws IOException {} */ @Override public void afterRefresh(boolean didRefresh) { - - if (didRefresh || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty()) { + if (this.primaryTerm != indexShard.getOperationPrimaryTerm() + || didRefresh + || remoteDirectory.getSegmentsUploadedToRemoteStore().isEmpty()) { updateLocalRefreshTimeAndSeqNo(); try { indexShard.getThreadPool().executor(ThreadPool.Names.REMOTE_REFRESH).submit(() -> syncSegments(false)).get(); diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index a81dc96ff1145..119524e8caf8a 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -59,14 +59,19 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.snapshots.IndexShardRestoreFailedException; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import java.io.IOException; +import java.nio.channels.FileChannel; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -347,6 +352,72 @@ void recoverFromRepository(final IndexShard indexShard, Repository repository, A } } + void recoverFromSnapshotAndRemoteStore( + final IndexShard indexShard, + Repository repository, + RepositoriesService repositoriesService, + ActionListener listener + ) { + try { + if (canRecover(indexShard)) { + indexShard.preRecovery(); + RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); + assert recoveryType == RecoverySource.Type.SNAPSHOT : "expected snapshot recovery type: " + recoveryType; + SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource(); + final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); + translogState.totalOperations(0); + translogState.totalOperationsOnStart(0); + indexShard.prepareForIndexRecovery(); + + RemoteStoreShardShallowCopySnapshot shallowCopyShardMetadata = repository.getRemoteStoreShallowCopyShardMetadata( + recoverySource.snapshot().getSnapshotId(), + recoverySource.index(), + shardId + ); + + long primaryTerm = shallowCopyShardMetadata.getPrimaryTerm(); + long commitGeneration = shallowCopyShardMetadata.getCommitGeneration(); + String indexUUID = shallowCopyShardMetadata.getIndexUUID(); + String remoteStoreRepository = ((SnapshotRecoverySource) indexShard.recoveryState().getRecoverySource()) + .sourceRemoteStoreRepository(); + if (remoteStoreRepository == null) { + remoteStoreRepository = shallowCopyShardMetadata.getRemoteStoreRepository(); + } + + RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService); + RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( + remoteStoreRepository, + indexUUID, + String.valueOf(shardId.id()) + ); + indexShard.syncSegmentsFromGivenRemoteSegmentStore(true, sourceRemoteDirectory, primaryTerm, commitGeneration); + final Store store = indexShard.store(); + if (indexShard.indexSettings.isRemoteTranslogStoreEnabled() == false) { + bootstrap(indexShard, store); + } else { + bootstrapForSnapshot(indexShard, store); + } + assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; + writeEmptyRetentionLeasesFile(indexShard); + indexShard.recoveryState().getIndex().setFileDetailsComplete(); + if (indexShard.indexSettings.isRemoteStoreEnabled()) { + indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); + } else { + indexShard.openEngineAndRecoverFromTranslog(); + } + indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); + indexShard.finalizeRecovery(); + indexShard.postRecovery("restore done"); + + listener.onResponse(true); + } else { + listener.onResponse(false); + } + } catch (Exception e) { + listener.onFailure(e); + } + } + private boolean canRecover(IndexShard indexShard) { if (indexShard.state() == IndexShardState.CLOSED) { // got closed on us, just ignore this recovery @@ -597,10 +668,18 @@ private void restore( } final ActionListener restoreListener = ActionListener.wrap(v -> { final Store store = indexShard.store(); - bootstrap(indexShard, store); + if (indexShard.indexSettings.isRemoteTranslogStoreEnabled() == false) { + bootstrap(indexShard, store); + } else { + bootstrapForSnapshot(indexShard, store); + } assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; writeEmptyRetentionLeasesFile(indexShard); - indexShard.openEngineAndRecoverFromTranslog(); + if (indexShard.indexSettings.isRemoteStoreEnabled()) { + indexShard.openEngineAndSkipTranslogRecoveryFromSnapshot(); + } else { + indexShard.openEngineAndRecoverFromTranslog(); + } indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm()); indexShard.finalizeRecovery(); indexShard.postRecovery("restore done"); @@ -644,6 +723,21 @@ private void restore( } } + private void bootstrapForSnapshot(final IndexShard indexShard, final Store store) throws IOException { + store.bootstrapNewHistory(); + final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); + final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), + shardId, + localCheckpoint, + indexShard.getPendingPrimaryTerm(), + translogUUID, + FileChannel::open + ); + } + private void bootstrap(final IndexShard indexShard, final Store store) throws IOException { store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 15c6fbea99148..addd8a24af9c5 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -126,6 +126,24 @@ public RemoteSegmentMetadata init() throws IOException { return remoteSegmentMetadata; } + /** + * Initializes the cache to a specific commit which keeps track of all the segment files uploaded to the + * remote segment store. + * this is currently used to restore snapshots, where we want to copy segment files from a given commit. + * TODO: check if we can return read only RemoteSegmentStoreDirectory object from here. + * @throws IOException if there were any failures in reading the metadata file + */ + public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long commitGeneration) throws IOException { + String metadataFile = getMetadataFileForCommit(primaryTerm, commitGeneration); + RemoteSegmentMetadata remoteSegmentMetadata = readMetadataFile(metadataFile); + if (remoteSegmentMetadata != null) { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); + } else { + this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(); + } + return remoteSegmentMetadata; + } + /** * Read the latest metadata file to get the list of segments uploaded to the remote segment store. * We upload a metadata file per refresh, but it is not unique per refresh. Refresh metadata file is unique for a given commit. @@ -485,6 +503,7 @@ public void uploadMetadata( new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(uploadedSegments), segmentInfoSnapshotByteArray, + primaryTerm, segmentInfosSnapshot.getGeneration() ) ); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 388f80ea3e480..03995d5913fb3 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -43,6 +43,11 @@ public Directory newDirectory(IndexSettings indexSettings, ShardPath path) throw String repositoryName = indexSettings.getRemoteStoreRepository(); String indexUUID = indexSettings.getIndex().getUUID(); String shardId = String.valueOf(path.getShardId().getId()); + + return newDirectory(repositoryName, indexUUID, shardId); + } + + public Directory newDirectory(String repositoryName, String indexUUID, String shardId) throws IOException { try (Repository repository = repositoriesService.get().repository(repositoryName)) { assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; BlobPath commonBlobPath = ((BlobStoreRepository) repository).basePath(); diff --git a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java index 5ed1f5e0ee21b..f23e057196096 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java +++ b/server/src/main/java/org/opensearch/index/store/remote/filecache/FileCacheFactory.java @@ -11,7 +11,6 @@ import org.opensearch.common.breaker.CircuitBreaker; import org.opensearch.common.cache.RemovalReason; import org.opensearch.index.store.remote.utils.cache.SegmentedCache; -import org.opensearch.index.store.remote.file.OnDemandBlockSnapshotIndexInput; import java.nio.file.Files; import java.nio.file.Path; @@ -39,24 +38,11 @@ public class FileCacheFactory { public static FileCache createConcurrentLRUFileCache(long capacity, CircuitBreaker circuitBreaker) { - return createFileCache(createDefaultBuilder().capacity(capacity).build(), circuitBreaker); + return new FileCache(createDefaultBuilder().capacity(capacity).build(), circuitBreaker); } public static FileCache createConcurrentLRUFileCache(long capacity, int concurrencyLevel, CircuitBreaker circuitBreaker) { - return createFileCache(createDefaultBuilder().capacity(capacity).concurrencyLevel(concurrencyLevel).build(), circuitBreaker); - } - - private static FileCache createFileCache(SegmentedCache segmentedCache, CircuitBreaker circuitBreaker) { - /* - * Since OnDemandBlockSnapshotIndexInput.Builder.DEFAULT_BLOCK_SIZE is not overridden then it will be upper bound for max IndexInput - * size on disk. A single IndexInput size should always be more than a single segment in segmented cache. A FileCache capacity might - * be defined with large capacity (> IndexInput block size) but due to segmentation and concurrency factor, that capacity is - * distributed equally across segments. - */ - if (segmentedCache.getPerSegmentCapacity() <= OnDemandBlockSnapshotIndexInput.Builder.DEFAULT_BLOCK_SIZE) { - throw new IllegalStateException("FileSystem Cache per segment capacity is less than single IndexInput default block size"); - } - return new FileCache(segmentedCache, circuitBreaker); + return new FileCache(createDefaultBuilder().capacity(capacity).concurrencyLevel(concurrencyLevel).build(), circuitBreaker); } private static SegmentedCache.Builder createDefaultBuilder() { diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java index 2a84fbfb89c93..9a479346ff711 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java @@ -38,16 +38,19 @@ public class RemoteSegmentMetadata { private final byte[] segmentInfosBytes; + private final long primaryTerm; private final long generation; public RemoteSegmentMetadata( Map metadata, byte[] segmentInfosBytes, + long primaryTerm, long generation ) { this.metadata = metadata; this.segmentInfosBytes = segmentInfosBytes; this.generation = generation; + this.primaryTerm = primaryTerm; } /** @@ -66,6 +69,10 @@ public long getGeneration() { return generation; } + public long getPrimaryTerm() { + return primaryTerm; + } + /** * Generate {@code Map} from {@link RemoteSegmentMetadata} * @return {@code Map} @@ -93,6 +100,7 @@ public static Map f public void write(IndexOutput out) throws IOException { out.writeMapOfStrings(toMapOfStrings()); out.writeLong(generation); + out.writeLong(primaryTerm); out.writeLong(segmentInfosBytes.length); out.writeBytes(segmentInfosBytes, segmentInfosBytes.length); } @@ -100,9 +108,10 @@ public void write(IndexOutput out) throws IOException { public static RemoteSegmentMetadata read(IndexInput indexInput) throws IOException { Map metadata = indexInput.readMapOfStrings(); long generation = indexInput.readLong(); + long primaryTerm = indexInput.readLong(); int byteArraySize = (int) indexInput.readLong(); byte[] segmentInfosBytes = new byte[byteArraySize]; indexInput.readBytes(segmentInfosBytes, 0, byteArraySize); - return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, generation); + return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, primaryTerm, generation); } } diff --git a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java index 6ebb1bf7d2252..04057b581e8d9 100644 --- a/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java +++ b/server/src/main/java/org/opensearch/index/translog/RemoteFsTranslog.java @@ -423,6 +423,20 @@ private void deleteStaleRemotePrimaryTermsAndMetadataFiles() { } } + public static void cleanup(Repository repository, ShardId shardId, ThreadPool threadPool) throws IOException { + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + FileTransferTracker fileTransferTracker = new FileTransferTracker(shardId); + TranslogTransferManager translogTransferManager = buildTranslogTransferManager( + blobStoreRepository, + threadPool, + shardId, + fileTransferTracker + ); + // clean up all remote translog files + translogTransferManager.deleteTranslogFiles(); + } + protected void onDelete() { if (primaryModeSupplier.getAsBoolean() == false) { logger.trace("skipped delete translog"); diff --git a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java index 58aca00d2e9d3..f6405bc9b5c82 100644 --- a/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java +++ b/server/src/main/java/org/opensearch/index/translog/transfer/TranslogTransferManager.java @@ -361,6 +361,11 @@ public void onFailure(Exception e) { }); } + public void deleteTranslogFiles() throws IOException { + transferService.delete(remoteMetadataTransferPath); + transferService.delete(remoteDataTransferPath); + } + /** * Deletes list of translog files asynchronously using the {@code REMOTE_PURGE} threadpool. * diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 4a0fab82f9adc..e4b251914fa0b 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -107,6 +107,7 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.CLOSED; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; import static org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.FAILURE; @@ -544,7 +545,19 @@ private void createIndices(final ClusterState state) { AllocatedIndex indexService = null; try { - indexService = indicesService.createIndex(indexMetadata, builtInIndexListener, true); + List updatedIndexEventListeners = new ArrayList<>(builtInIndexListener); + if (entry.getValue().size() > 0 + && entry.getValue().get(0).recoverySource().getType() == Type.SNAPSHOT + && indexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) { + final IndexEventListener refreshListenerAfterSnapshotRestore = new IndexEventListener() { + @Override + public void afterIndexShardStarted(IndexShard indexShard) { + indexShard.refresh("refresh to upload metadata to remote store"); + } + }; + updatedIndexEventListeners.add(refreshListenerAfterSnapshotRestore); + } + indexService = indicesService.createIndex(indexMetadata, updatedIndexEventListeners, true); if (indexService.updateMapping(null, indexMetadata) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh( state.nodes().getClusterManagerNode(), diff --git a/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java b/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java index 8cdbc487dc137..a5f4870029e87 100644 --- a/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java +++ b/server/src/main/java/org/opensearch/ingest/CompoundProcessor.java @@ -34,6 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.metrics.OperationMetrics; import java.util.ArrayList; import java.util.Arrays; @@ -60,7 +61,7 @@ public class CompoundProcessor implements Processor { private final boolean ignoreFailure; private final List processors; private final List onFailureProcessors; - private final List> processorsWithMetrics; + private final List> processorsWithMetrics; private final LongSupplier relativeTimeProvider; CompoundProcessor(LongSupplier relativeTimeProvider, Processor... processor) { @@ -87,10 +88,10 @@ public CompoundProcessor(boolean ignoreFailure, List processors, List this.onFailureProcessors = onFailureProcessors; this.relativeTimeProvider = relativeTimeProvider; this.processorsWithMetrics = new ArrayList<>(processors.size()); - processors.forEach(p -> processorsWithMetrics.add(new Tuple<>(p, new IngestMetric()))); + processors.forEach(p -> processorsWithMetrics.add(new Tuple<>(p, new OperationMetrics()))); } - List> getProcessorsWithMetrics() { + List> getProcessorsWithMetrics() { return processorsWithMetrics; } @@ -155,17 +156,17 @@ void innerExecute(int currentProcessor, IngestDocument ingestDocument, BiConsume return; } - Tuple processorWithMetric = processorsWithMetrics.get(currentProcessor); + Tuple processorWithMetric = processorsWithMetrics.get(currentProcessor); final Processor processor = processorWithMetric.v1(); - final IngestMetric metric = processorWithMetric.v2(); + final OperationMetrics metric = processorWithMetric.v2(); final long startTimeInNanos = relativeTimeProvider.getAsLong(); - metric.preIngest(); + metric.before(); processor.execute(ingestDocument, (result, e) -> { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos); - metric.postIngest(ingestTimeInMillis); + metric.after(ingestTimeInMillis); if (e != null) { - metric.ingestFailed(); + metric.failed(); if (ignoreFailure) { innerExecute(currentProcessor + 1, ingestDocument, handler); } else { diff --git a/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java b/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java index 591a71fd72b8f..8bf489805f7ca 100644 --- a/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java +++ b/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java @@ -32,6 +32,7 @@ package org.opensearch.ingest; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.script.IngestConditionalScript; import org.opensearch.script.Script; import org.opensearch.script.ScriptException; @@ -66,7 +67,7 @@ public class ConditionalProcessor extends AbstractProcessor implements WrappingP private final Script condition; private final ScriptService scriptService; private final Processor processor; - private final IngestMetric metric; + private final OperationMetrics metric; private final LongSupplier relativeTimeProvider; private final IngestConditionalScript precompiledConditionScript; @@ -86,7 +87,7 @@ public class ConditionalProcessor extends AbstractProcessor implements WrappingP this.condition = script; this.scriptService = scriptService; this.processor = processor; - this.metric = new IngestMetric(); + this.metric = new OperationMetrics(); this.relativeTimeProvider = relativeTimeProvider; try { @@ -114,12 +115,12 @@ public void execute(IngestDocument ingestDocument, BiConsumer { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos); - metric.postIngest(ingestTimeInMillis); + metric.after(ingestTimeInMillis); if (e != null) { - metric.ingestFailed(); + metric.failed(); handler.accept(null, e); } else { handler.accept(result, null); @@ -148,7 +149,7 @@ public Processor getInnerProcessor() { return processor; } - IngestMetric getMetric() { + OperationMetrics getMetric() { return metric; } diff --git a/server/src/main/java/org/opensearch/ingest/IngestMetric.java b/server/src/main/java/org/opensearch/ingest/IngestMetric.java deleted file mode 100644 index 2d4a1dc9cfdee..0000000000000 --- a/server/src/main/java/org/opensearch/ingest/IngestMetric.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.ingest; - -import org.opensearch.common.metrics.CounterMetric; -import org.opensearch.common.metrics.MeanMetric; - -import java.util.concurrent.atomic.AtomicLong; - -/** - *

Metrics to measure ingest actions. - *

This counts measure documents and timings for a given scope. - * The scope is determined by the calling code. For example you can use this class to count all documents across all pipeline, - * or you can use this class to count documents for a given pipeline or a specific processor. - * This class does not make assumptions about it's given scope. - * - * @opensearch.internal - */ -class IngestMetric { - - /** - * The time it takes to complete the measured item. - */ - private final MeanMetric ingestTime = new MeanMetric(); - /** - * The current count of things being measure. Should most likely ever be 0 or 1. - * Useful when aggregating multiple metrics to see how many things are in flight. - */ - private final AtomicLong ingestCurrent = new AtomicLong(); - /** - * The ever increasing count of things being measured - */ - private final CounterMetric ingestCount = new CounterMetric(); - /** - * The only increasing count of failures - */ - private final CounterMetric ingestFailed = new CounterMetric(); - - /** - * Call this prior to the ingest action. - */ - void preIngest() { - ingestCurrent.incrementAndGet(); - } - - /** - * Call this after the performing the ingest action, even if the action failed. - * @param ingestTimeInMillis The time it took to perform the action. - */ - void postIngest(long ingestTimeInMillis) { - ingestCurrent.decrementAndGet(); - ingestTime.inc(ingestTimeInMillis); - ingestCount.inc(); - } - - /** - * Call this if the ingest action failed. - */ - void ingestFailed() { - ingestFailed.inc(); - } - - /** - *

Add two sets of metrics together. - *

Note - this method does not add the current count values. - * The current count value is ephemeral and requires a increase/decrease operation pairs to keep the value correct. - * - * @param metrics The metric to add. - */ - void add(IngestMetric metrics) { - ingestCount.inc(metrics.ingestCount.count()); - ingestTime.inc(metrics.ingestTime.sum()); - ingestFailed.inc(metrics.ingestFailed.count()); - } - - /** - * Creates a serializable representation for these metrics. - */ - IngestStats.Stats createStats() { - return new IngestStats.Stats(ingestCount.count(), ingestTime.sum(), ingestCurrent.get(), ingestFailed.count()); - } -} diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index b9785d9ec036f..0984046ca3077 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -60,6 +60,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.collect.Tuple; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -114,7 +115,7 @@ public class IngestService implements ClusterStateApplier, ReportingService pipelines = Collections.emptyMap(); private final ThreadPool threadPool; - private final IngestMetric totalMetrics = new IngestMetric(); + private final OperationMetrics totalMetrics = new OperationMetrics(); private final List> ingestClusterStateListeners = new CopyOnWriteArrayList<>(); private final ClusterManagerTaskThrottler.ThrottlingKey putPipelineTaskKey; private final ClusterManagerTaskThrottler.ThrottlingKey deletePipelineTaskKey; @@ -440,17 +441,17 @@ Map pipelines() { * Recursive method to obtain all of the non-failure processors for given compoundProcessor. Since conditionals are implemented as * wrappers to the actual processor, always prefer the actual processor's metric over the conditional processor's metric. * @param compoundProcessor The compound processor to start walking the non-failure processors - * @param processorMetrics The list of {@link Processor} {@link IngestMetric} tuples. + * @param processorMetrics The list of {@link Processor} {@link OperationMetrics} tuples. * @return the processorMetrics for all non-failure processor that belong to the original compoundProcessor */ - private static List> getProcessorMetrics( + private static List> getProcessorMetrics( CompoundProcessor compoundProcessor, - List> processorMetrics + List> processorMetrics ) { // only surface the top level non-failure processors, on-failure processor times will be included in the top level non-failure - for (Tuple processorWithMetric : compoundProcessor.getProcessorsWithMetrics()) { + for (Tuple processorWithMetric : compoundProcessor.getProcessorsWithMetrics()) { Processor processor = processorWithMetric.v1(); - IngestMetric metric = processorWithMetric.v2(); + OperationMetrics metric = processorWithMetric.v2(); if (processor instanceof CompoundProcessor) { getProcessorMetrics((CompoundProcessor) processor, processorMetrics); } else { @@ -614,7 +615,7 @@ private void executePipelines( if (Objects.equals(originalIndex, newIndex) == false) { if (hasFinalPipeline && it.hasNext() == false) { - totalMetrics.ingestFailed(); + totalMetrics.failed(); onFailure.accept( slot, new IllegalStateException("final pipeline [" + pipelineId + "] can't change the target index") @@ -680,11 +681,11 @@ public IngestStats stats() { Pipeline pipeline = holder.pipeline; CompoundProcessor rootProcessor = pipeline.getCompoundProcessor(); statsBuilder.addPipelineMetrics(id, pipeline.getMetrics()); - List> processorMetrics = new ArrayList<>(); + List> processorMetrics = new ArrayList<>(); getProcessorMetrics(rootProcessor, processorMetrics); processorMetrics.forEach(t -> { Processor processor = t.v1(); - IngestMetric processorMetric = t.v2(); + OperationMetrics processorMetric = t.v2(); statsBuilder.addProcessorMetrics(id, getProcessorName(processor), processor.getType(), processorMetric); }); }); @@ -739,7 +740,7 @@ private void innerExecute( long startTimeInNanos = System.nanoTime(); // the pipeline specific stat holder may not exist and that is fine: // (e.g. the pipeline may have been removed while we're ingesting a document - totalMetrics.preIngest(); + totalMetrics.before(); String index = indexRequest.index(); String id = indexRequest.id(); String routing = indexRequest.routing(); @@ -749,9 +750,9 @@ private void innerExecute( IngestDocument ingestDocument = new IngestDocument(index, id, routing, version, versionType, sourceAsMap); ingestDocument.executePipeline(pipeline, (result, e) -> { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeInNanos); - totalMetrics.postIngest(ingestTimeInMillis); + totalMetrics.after(ingestTimeInMillis); if (e != null) { - totalMetrics.ingestFailed(); + totalMetrics.failed(); handler.accept(e); } else if (result == null) { itemDroppedHandler.accept(slot); @@ -835,22 +836,22 @@ void innerUpdatePipelines(IngestMetadata newIngestMetadata) { } Pipeline oldPipeline = previous.pipeline; newPipeline.getMetrics().add(oldPipeline.getMetrics()); - List> oldPerProcessMetrics = new ArrayList<>(); - List> newPerProcessMetrics = new ArrayList<>(); + List> oldPerProcessMetrics = new ArrayList<>(); + List> newPerProcessMetrics = new ArrayList<>(); getProcessorMetrics(oldPipeline.getCompoundProcessor(), oldPerProcessMetrics); getProcessorMetrics(newPipeline.getCompoundProcessor(), newPerProcessMetrics); // Best attempt to populate new processor metrics using a parallel array of the old metrics. This is not ideal since // the per processor metrics may get reset when the arrays don't match. However, to get to an ideal model, unique and // consistent id's per processor and/or semantic equals for each processor will be needed. if (newPerProcessMetrics.size() == oldPerProcessMetrics.size()) { - Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); - for (Tuple compositeMetric : newPerProcessMetrics) { + Iterator> oldMetricsIterator = oldPerProcessMetrics.iterator(); + for (Tuple compositeMetric : newPerProcessMetrics) { String type = compositeMetric.v1().getType(); - IngestMetric metric = compositeMetric.v2(); + OperationMetrics metric = compositeMetric.v2(); if (oldMetricsIterator.hasNext()) { - Tuple oldCompositeMetric = oldMetricsIterator.next(); + Tuple oldCompositeMetric = oldMetricsIterator.next(); String oldType = oldCompositeMetric.v1().getType(); - IngestMetric oldMetric = oldCompositeMetric.v2(); + OperationMetrics oldMetric = oldCompositeMetric.v2(); if (type.equals(oldType)) { metric.add(oldMetric); } diff --git a/server/src/main/java/org/opensearch/ingest/IngestStats.java b/server/src/main/java/org/opensearch/ingest/IngestStats.java index 0f6209d1c005e..ac06d779bdf2d 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestStats.java +++ b/server/src/main/java/org/opensearch/ingest/IngestStats.java @@ -35,7 +35,8 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.metrics.OperationMetrics; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; @@ -46,15 +47,14 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.TimeUnit; /** - * Stats for an ingest processor pipeline + * OperationStats for an ingest processor pipeline * * @opensearch.internal */ public class IngestStats implements Writeable, ToXContentFragment { - private final Stats totalStats; + private final OperationStats totalStats; private final List pipelineStats; private final Map> processorStats; @@ -64,7 +64,7 @@ public class IngestStats implements Writeable, ToXContentFragment { * @param pipelineStats - The stats for a given ingest pipeline. * @param processorStats - The per-processor stats for a given pipeline. A map keyed by the pipeline identifier. */ - public IngestStats(Stats totalStats, List pipelineStats, Map> processorStats) { + public IngestStats(OperationStats totalStats, List pipelineStats, Map> processorStats) { this.totalStats = totalStats; this.pipelineStats = pipelineStats; this.processorStats = processorStats; @@ -74,13 +74,13 @@ public IngestStats(Stats totalStats, List pipelineStats, Map(size); this.processorStats = new HashMap<>(size); for (int i = 0; i < size; i++) { String pipelineId = in.readString(); - Stats pipelineStat = new Stats(in); + OperationStats pipelineStat = new OperationStats(in); this.pipelineStats.add(new PipelineStat(pipelineId, pipelineStat)); int processorsSize = in.readVInt(); List processorStatsPerPipeline = new ArrayList<>(processorsSize); @@ -88,7 +88,7 @@ public IngestStats(StreamInput in) throws IOException { String processorName = in.readString(); String processorType = "_NOT_AVAILABLE"; processorType = in.readString(); - Stats processorStat = new Stats(in); + OperationStats processorStat = new OperationStats(in); processorStatsPerPipeline.add(new ProcessorStat(processorName, processorType, processorStat)); } this.processorStats.put(pipelineId, processorStatsPerPipeline); @@ -148,7 +148,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - public Stats getTotalStats() { + public OperationStats getTotalStats() { return totalStats; } @@ -176,115 +176,24 @@ public int hashCode() { } /** - * The ingest statistics. - * - * @opensearch.internal - */ - public static class Stats implements Writeable, ToXContentFragment { - - private final long ingestCount; - private final long ingestTimeInMillis; - private final long ingestCurrent; - private final long ingestFailedCount; - - public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) { - this.ingestCount = ingestCount; - this.ingestTimeInMillis = ingestTimeInMillis; - this.ingestCurrent = ingestCurrent; - this.ingestFailedCount = ingestFailedCount; - } - - /** - * Read from a stream. - */ - public Stats(StreamInput in) throws IOException { - ingestCount = in.readVLong(); - ingestTimeInMillis = in.readVLong(); - ingestCurrent = in.readVLong(); - ingestFailedCount = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(ingestCount); - out.writeVLong(ingestTimeInMillis); - out.writeVLong(ingestCurrent); - out.writeVLong(ingestFailedCount); - } - - /** - * @return The total number of executed ingest preprocessing operations. - */ - public long getIngestCount() { - return ingestCount; - } - - /** - * @return The total time spent of ingest preprocessing in millis. - */ - public long getIngestTimeInMillis() { - return ingestTimeInMillis; - } - - /** - * @return The total number of ingest preprocessing operations currently executing. - */ - public long getIngestCurrent() { - return ingestCurrent; - } - - /** - * @return The total number of ingest preprocessing operations that have failed. - */ - public long getIngestFailedCount() { - return ingestFailedCount; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("count", ingestCount); - builder.humanReadableField("time_in_millis", "time", new TimeValue(ingestTimeInMillis, TimeUnit.MILLISECONDS)); - builder.field("current", ingestCurrent); - builder.field("failed", ingestFailedCount); - return builder; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - IngestStats.Stats that = (IngestStats.Stats) o; - return Objects.equals(ingestCount, that.ingestCount) - && Objects.equals(ingestTimeInMillis, that.ingestTimeInMillis) - && Objects.equals(ingestFailedCount, that.ingestFailedCount) - && Objects.equals(ingestCurrent, that.ingestCurrent); - } - - @Override - public int hashCode() { - return Objects.hash(ingestCount, ingestTimeInMillis, ingestFailedCount, ingestCurrent); - } - } - - /** - * Easy conversion from scoped {@link IngestMetric} objects to a serializable Stats objects + * Easy conversion from scoped {@link OperationMetrics} objects to a serializable OperationStats objects */ static class Builder { - private Stats totalStats; + private OperationStats totalStats; private List pipelineStats = new ArrayList<>(); private Map> processorStats = new HashMap<>(); - Builder addTotalMetrics(IngestMetric totalMetric) { + Builder addTotalMetrics(OperationMetrics totalMetric) { this.totalStats = totalMetric.createStats(); return this; } - Builder addPipelineMetrics(String pipelineId, IngestMetric pipelineMetric) { + Builder addPipelineMetrics(String pipelineId, OperationMetrics pipelineMetric) { this.pipelineStats.add(new PipelineStat(pipelineId, pipelineMetric.createStats())); return this; } - Builder addProcessorMetrics(String pipelineId, String processorName, String processorType, IngestMetric metric) { + Builder addProcessorMetrics(String pipelineId, String processorName, String processorType, OperationMetrics metric) { this.processorStats.computeIfAbsent(pipelineId, k -> new ArrayList<>()) .add(new ProcessorStat(processorName, processorType, metric.createStats())); return this; @@ -300,9 +209,9 @@ IngestStats build() { */ public static class PipelineStat { private final String pipelineId; - private final Stats stats; + private final OperationStats stats; - public PipelineStat(String pipelineId, Stats stats) { + public PipelineStat(String pipelineId, OperationStats stats) { this.pipelineId = pipelineId; this.stats = stats; } @@ -311,7 +220,7 @@ public String getPipelineId() { return pipelineId; } - public Stats getStats() { + public OperationStats getStats() { return stats; } @@ -335,9 +244,9 @@ public int hashCode() { public static class ProcessorStat { private final String name; private final String type; - private final Stats stats; + private final OperationStats stats; - public ProcessorStat(String name, String type, Stats stats) { + public ProcessorStat(String name, String type, OperationStats stats) { this.name = name; this.type = type; this.stats = stats; @@ -351,7 +260,7 @@ public String getType() { return type; } - public Stats getStats() { + public OperationStats getStats() { return stats; } diff --git a/server/src/main/java/org/opensearch/ingest/Pipeline.java b/server/src/main/java/org/opensearch/ingest/Pipeline.java index 9b3725fd65d9d..766fb9cd66777 100644 --- a/server/src/main/java/org/opensearch/ingest/Pipeline.java +++ b/server/src/main/java/org/opensearch/ingest/Pipeline.java @@ -43,6 +43,7 @@ import java.util.function.BiConsumer; import java.util.function.LongSupplier; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.script.ScriptService; /** @@ -63,7 +64,7 @@ public final class Pipeline { @Nullable private final Integer version; private final CompoundProcessor compoundProcessor; - private final IngestMetric metrics; + private final OperationMetrics metrics; private final LongSupplier relativeTimeProvider; public Pipeline(String id, @Nullable String description, @Nullable Integer version, CompoundProcessor compoundProcessor) { @@ -82,7 +83,7 @@ public Pipeline(String id, @Nullable String description, @Nullable Integer versi this.description = description; this.compoundProcessor = compoundProcessor; this.version = version; - this.metrics = new IngestMetric(); + this.metrics = new OperationMetrics(); this.relativeTimeProvider = relativeTimeProvider; } @@ -129,12 +130,12 @@ public static Pipeline create( */ public void execute(IngestDocument ingestDocument, BiConsumer handler) { final long startTimeInNanos = relativeTimeProvider.getAsLong(); - metrics.preIngest(); + metrics.before(); compoundProcessor.execute(ingestDocument, (result, e) -> { long ingestTimeInMillis = TimeUnit.NANOSECONDS.toMillis(relativeTimeProvider.getAsLong() - startTimeInNanos); - metrics.postIngest(ingestTimeInMillis); + metrics.after(ingestTimeInMillis); if (e != null) { - metrics.ingestFailed(); + metrics.failed(); } handler.accept(result, e); }); @@ -198,7 +199,7 @@ public List flattenAllProcessors() { /** * The metrics associated with this pipeline. */ - public IngestMetric getMetrics() { + public OperationMetrics getMetrics() { return metrics; } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 688f2d05b203b..3742c817118da 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -56,6 +56,8 @@ import org.opensearch.monitor.fs.FsProbe; import org.opensearch.plugins.ExtensionAwarePlugin; import org.opensearch.plugins.SearchPipelinePlugin; +import org.opensearch.telemetry.tracing.NoopTracerFactory; +import org.opensearch.telemetry.tracing.TracerFactory; import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.search.pipeline.SearchPipelineService; @@ -65,6 +67,8 @@ import org.opensearch.tasks.consumer.TopNSearchTasksLogger; import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; +import org.opensearch.telemetry.TelemetryModule; +import org.opensearch.telemetry.TelemetrySettings; import org.opensearch.watcher.ResourceWatcherService; import org.opensearch.core.Assertions; import org.opensearch.Build; @@ -194,6 +198,7 @@ import org.opensearch.plugins.ScriptPlugin; import org.opensearch.plugins.SearchPlugin; import org.opensearch.plugins.SystemIndexPlugin; +import org.opensearch.plugins.TelemetryPlugin; import org.opensearch.repositories.RepositoriesModule; import org.opensearch.repositories.RepositoriesService; import org.opensearch.rest.RestController; @@ -254,6 +259,7 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.common.util.FeatureFlags.SEARCH_PIPELINE; +import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; @@ -373,6 +379,7 @@ public static class DiscoverySettings { private final Collection pluginLifecycleComponents; private final LocalNodeFactory localNodeFactory; private final NodeService nodeService; + private final TracerFactory tracerFactory; final NamedWriteableRegistry namedWriteableRegistry; private final AtomicReference runnableTaskListener; private FileCache fileCache; @@ -1021,6 +1028,16 @@ protected Node( searchModule.getIndexSearcherExecutor(threadPool) ); + if (FeatureFlags.isEnabled(TELEMETRY)) { + final TelemetrySettings telemetrySettings = new TelemetrySettings(settings, clusterService.getClusterSettings()); + List telemetryPlugins = pluginsService.filterPlugins(TelemetryPlugin.class); + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, telemetrySettings); + tracerFactory = new TracerFactory(telemetrySettings, telemetryModule.getTelemetry(), threadPool.getThreadContext()); + } else { + tracerFactory = new NoopTracerFactory(); + } + resourcesToClose.add(tracerFactory::close); + final List> tasksExecutors = pluginsService.filterPlugins(PersistentTaskPlugin.class) .stream() .map( @@ -1126,6 +1143,7 @@ protected Node( b.bind(FsHealthService.class).toInstance(fsHealthService); b.bind(SystemIndices.class).toInstance(systemIndices); b.bind(IdentityService.class).toInstance(identityService); + b.bind(TracerFactory.class).toInstance(this.tracerFactory); }); injector = modules.createInjector(); @@ -1481,6 +1499,9 @@ public synchronized void close() throws IOException { toClose.add(() -> stopWatch.stop().start("node_environment")); toClose.add(injector.getInstance(NodeEnvironment.class)); toClose.add(stopWatch::stop); + if (FeatureFlags.isEnabled(TELEMETRY)) { + toClose.add(() -> injector.getInstance(TracerFactory.class)); + } if (logger.isTraceEnabled()) { toClose.add(() -> logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint())); diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 9382746081c18..6f4fe1e083ad7 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -216,7 +216,8 @@ public NodeStats stats( boolean clusterManagerThrottling, boolean weightedRoutingStats, boolean fileCacheStats, - boolean taskCancellation + boolean taskCancellation, + boolean searchPipelineStats ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -243,7 +244,8 @@ public NodeStats stats( clusterManagerThrottling ? this.clusterService.getClusterManagerService().getThrottlingStats() : null, weightedRoutingStats ? WeightedRoutingStats.getInstance() : null, fileCacheStats && fileCache != null ? fileCache.fileCacheStats() : null, - taskCancellation ? this.taskCancellationMonitoringService.stats() : null + taskCancellation ? this.taskCancellationMonitoringService.stats() : null, + searchPipelineStats ? this.searchPipelineService.stats() : null ); } diff --git a/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java new file mode 100644 index 0000000000000..33dc9b7a0c843 --- /dev/null +++ b/server/src/main/java/org/opensearch/plugins/TelemetryPlugin.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.plugins; + +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; + +import java.util.Optional; + +/** + * Plugin for extending telemetry related classes + */ +public interface TelemetryPlugin { + + Optional getTelemetry(TelemetrySettings settings); + + String getName(); + +} diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index 88e14a4dff3a0..b108e2da1ab04 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -45,6 +45,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.snapshots.SnapshotId; @@ -219,6 +220,15 @@ public void restoreShard( in.restoreShard(store, snapshotId, indexId, snapshotShardId, recoveryState, listener); } + @Override + public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + return in.getRemoteStoreShallowCopyShardMetadata(snapshotId, indexId, snapshotShardId); + } + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { return in.getShardSnapshotStatus(snapshotId, indexId, shardId); diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 045b7ad348a76..c08369b79452d 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -46,6 +46,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.snapshots.SnapshotId; @@ -304,6 +305,22 @@ void restoreShard( ActionListener listener ); + /** + * Returns Snapshot Shard Metadata for remote store interop enabled snapshot. + *

+ * The index can be renamed on restore, hence different {@code shardId} and {@code snapshotShardId} are supplied. + * @param snapshotId snapshot id + * @param indexId id of the index in the repository from which the restore is occurring + * @param snapshotShardId shard id (in the snapshot) + */ + default RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + throw new UnsupportedOperationException(); + } + /** * Retrieve shard snapshot status for the stored snapshot * diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c0d6f49a5ce0d..f04bf83c2f1d1 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -2789,6 +2789,16 @@ public InputStream maybeRateLimitSnapshots(InputStream stream) { return maybeRateLimit(stream, () -> snapshotRateLimiter, snapshotRateLimitingTimeInNanos); } + @Override + public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + final BlobContainer container = shardContainer(indexId, snapshotShardId); + return loadShallowCopyShardSnapshot(container, snapshotId); + } + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { BlobStoreIndexShardSnapshot snapshot = loadShardSnapshot(shardContainer(indexId, shardId), snapshotId); diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java index 6b511587cc271..a55b7d6bc154e 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/LongKeyedBucketOrds.java @@ -33,7 +33,7 @@ package org.opensearch.search.aggregations.bucket.terms; import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.LongHash; +import org.opensearch.common.util.ReorganizingLongHash; import org.opensearch.common.util.LongLongHash; import org.opensearch.common.lease.Releasable; import org.opensearch.search.aggregations.CardinalityUpperBound; @@ -148,10 +148,10 @@ public long value() { * @opensearch.internal */ public static class FromSingle extends LongKeyedBucketOrds { - private final LongHash ords; + private final ReorganizingLongHash ords; public FromSingle(BigArrays bigArrays) { - ords = new LongHash(1, bigArrays); + ords = new ReorganizingLongHash(bigArrays); } @Override diff --git a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java index c9a5f865d507e..6f44daf48ed21 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Pipeline.java @@ -8,7 +8,6 @@ package org.opensearch.search.pipeline; -import org.opensearch.OpenSearchParseException; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Nullable; @@ -16,17 +15,11 @@ import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.ingest.ConfigurationUtils; -import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.Map; - -import static org.opensearch.ingest.ConfigurationUtils.TAG_KEY; -import static org.opensearch.ingest.Pipeline.DESCRIPTION_KEY; -import static org.opensearch.ingest.Pipeline.VERSION_KEY; +import java.util.concurrent.TimeUnit; +import java.util.function.LongSupplier; /** * Concrete representation of a search pipeline, holding multiple processors. @@ -45,73 +38,24 @@ class Pipeline { private final List searchResponseProcessors; private final NamedWriteableRegistry namedWriteableRegistry; + private final LongSupplier relativeTimeSupplier; - private Pipeline( + Pipeline( String id, @Nullable String description, @Nullable Integer version, List requestProcessors, List responseProcessors, - NamedWriteableRegistry namedWriteableRegistry + NamedWriteableRegistry namedWriteableRegistry, + LongSupplier relativeTimeSupplier ) { this.id = id; this.description = description; this.version = version; - this.searchRequestProcessors = requestProcessors; - this.searchResponseProcessors = responseProcessors; + this.searchRequestProcessors = Collections.unmodifiableList(requestProcessors); + this.searchResponseProcessors = Collections.unmodifiableList(responseProcessors); this.namedWriteableRegistry = namedWriteableRegistry; - } - - static Pipeline create( - String id, - Map config, - Map> requestProcessorFactories, - Map> responseProcessorFactories, - NamedWriteableRegistry namedWriteableRegistry - ) throws Exception { - String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); - Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); - List> requestProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, REQUEST_PROCESSORS_KEY); - List requestProcessors = readProcessors(requestProcessorFactories, requestProcessorConfigs); - List> responseProcessorConfigs = ConfigurationUtils.readOptionalList( - null, - null, - config, - RESPONSE_PROCESSORS_KEY - ); - List responseProcessors = readProcessors(responseProcessorFactories, responseProcessorConfigs); - if (config.isEmpty() == false) { - throw new OpenSearchParseException( - "pipeline [" - + id - + "] doesn't support one or more provided configuration parameters " - + Arrays.toString(config.keySet().toArray()) - ); - } - return new Pipeline(id, description, version, requestProcessors, responseProcessors, namedWriteableRegistry); - } - - private static List readProcessors( - Map> processorFactories, - List> requestProcessorConfigs - ) throws Exception { - List processors = new ArrayList<>(); - if (requestProcessorConfigs == null) { - return processors; - } - for (Map processorConfigWithKey : requestProcessorConfigs) { - for (Map.Entry entry : processorConfigWithKey.entrySet()) { - String type = entry.getKey(); - if (!processorFactories.containsKey(type)) { - throw new IllegalArgumentException("Invalid processor type " + type); - } - Map config = (Map) entry.getValue(); - String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); - String description = ConfigurationUtils.readOptionalStringProperty(null, tag, config, DESCRIPTION_KEY); - processors.add(processorFactories.get(type).create(processorFactories, tag, description, config)); - } - } - return Collections.unmodifiableList(processors); + this.relativeTimeSupplier = relativeTimeSupplier; } String getId() { @@ -134,32 +78,94 @@ List getSearchResponseProcessors() { return searchResponseProcessors; } - SearchRequest transformRequest(SearchRequest request) throws Exception { + protected void beforeTransformRequest() {} + + protected void afterTransformRequest(long timeInNanos) {} + + protected void onTransformRequestFailure() {} + + protected void beforeRequestProcessor(Processor processor) {} + + protected void afterRequestProcessor(Processor processor, long timeInNanos) {} + + protected void onRequestProcessorFailed(Processor processor) {} + + protected void beforeTransformResponse() {} + + protected void afterTransformResponse(long timeInNanos) {} + + protected void onTransformResponseFailure() {} + + protected void beforeResponseProcessor(Processor processor) {} + + protected void afterResponseProcessor(Processor processor, long timeInNanos) {} + + protected void onResponseProcessorFailed(Processor processor) {} + + SearchRequest transformRequest(SearchRequest request) throws SearchPipelineProcessingException { if (searchRequestProcessors.isEmpty() == false) { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - request.writeTo(bytesStreamOutput); - try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { - try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { - request = new SearchRequest(input); + long pipelineStart = relativeTimeSupplier.getAsLong(); + beforeTransformRequest(); + try { + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + request.writeTo(bytesStreamOutput); + try (StreamInput in = bytesStreamOutput.bytes().streamInput()) { + try (StreamInput input = new NamedWriteableAwareStreamInput(in, namedWriteableRegistry)) { + request = new SearchRequest(input); + } } } - } - for (SearchRequestProcessor searchRequestProcessor : searchRequestProcessors) { - request = searchRequestProcessor.processRequest(request); + for (SearchRequestProcessor processor : searchRequestProcessors) { + beforeRequestProcessor(processor); + long start = relativeTimeSupplier.getAsLong(); + try { + request = processor.processRequest(request); + } catch (Exception e) { + onRequestProcessorFailed(processor); + throw e; + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterRequestProcessor(processor, took); + } + } + } catch (Exception e) { + onTransformRequestFailure(); + throw new SearchPipelineProcessingException(e); + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformRequest(took); } } return request; } SearchResponse transformResponse(SearchRequest request, SearchResponse response) throws SearchPipelineProcessingException { - try { - for (SearchResponseProcessor responseProcessor : searchResponseProcessors) { - response = responseProcessor.processResponse(request, response); + if (searchResponseProcessors.isEmpty() == false) { + long pipelineStart = relativeTimeSupplier.getAsLong(); + beforeTransformResponse(); + try { + for (SearchResponseProcessor processor : searchResponseProcessors) { + beforeResponseProcessor(processor); + long start = relativeTimeSupplier.getAsLong(); + try { + response = processor.processResponse(request, response); + } catch (Exception e) { + onResponseProcessorFailed(processor); + throw e; + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - start); + afterResponseProcessor(processor, took); + } + } + } catch (Exception e) { + onTransformResponseFailure(); + throw new SearchPipelineProcessingException(e); + } finally { + long took = TimeUnit.NANOSECONDS.toMillis(relativeTimeSupplier.getAsLong() - pipelineStart); + afterTransformResponse(took); } - return response; - } catch (Exception e) { - throw new SearchPipelineProcessingException(e); } + return response; } static final Pipeline NO_OP_PIPELINE = new Pipeline( @@ -168,6 +174,8 @@ SearchResponse transformResponse(SearchRequest request, SearchResponse response) 0, Collections.emptyList(), Collections.emptyList(), - null + null, + () -> 0L ); + } diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java new file mode 100644 index 0000000000000..662473f190006 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java @@ -0,0 +1,227 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.OpenSearchParseException; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.metrics.OperationMetrics; +import org.opensearch.ingest.ConfigurationUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.LongSupplier; + +import static org.opensearch.ingest.ConfigurationUtils.TAG_KEY; +import static org.opensearch.ingest.Pipeline.DESCRIPTION_KEY; +import static org.opensearch.ingest.Pipeline.VERSION_KEY; + +/** + * Specialization of {@link Pipeline} that adds metrics to track executions of the pipeline and individual processors. + */ +class PipelineWithMetrics extends Pipeline { + + private final OperationMetrics totalRequestMetrics; + private final OperationMetrics totalResponseMetrics; + private final OperationMetrics pipelineRequestMetrics = new OperationMetrics(); + private final OperationMetrics pipelineResponseMetrics = new OperationMetrics(); + private final Map requestProcessorMetrics = new HashMap<>(); + private final Map responseProcessorMetrics = new HashMap<>(); + + PipelineWithMetrics( + String id, + String description, + Integer version, + List requestProcessors, + List responseProcessors, + NamedWriteableRegistry namedWriteableRegistry, + OperationMetrics totalRequestMetrics, + OperationMetrics totalResponseMetrics, + LongSupplier relativeTimeSupplier + ) { + super(id, description, version, requestProcessors, responseProcessors, namedWriteableRegistry, relativeTimeSupplier); + this.totalRequestMetrics = totalRequestMetrics; + this.totalResponseMetrics = totalResponseMetrics; + for (Processor requestProcessor : getSearchRequestProcessors()) { + requestProcessorMetrics.putIfAbsent(getProcessorKey(requestProcessor), new OperationMetrics()); + } + for (Processor responseProcessor : getSearchResponseProcessors()) { + responseProcessorMetrics.putIfAbsent(getProcessorKey(responseProcessor), new OperationMetrics()); + } + } + + static PipelineWithMetrics create( + String id, + Map config, + Map> requestProcessorFactories, + Map> responseProcessorFactories, + NamedWriteableRegistry namedWriteableRegistry, + OperationMetrics totalRequestProcessingMetrics, + OperationMetrics totalResponseProcessingMetrics + ) throws Exception { + String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); + Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); + List> requestProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, REQUEST_PROCESSORS_KEY); + List requestProcessors = readProcessors(requestProcessorFactories, requestProcessorConfigs); + List> responseProcessorConfigs = ConfigurationUtils.readOptionalList( + null, + null, + config, + RESPONSE_PROCESSORS_KEY + ); + List responseProcessors = readProcessors(responseProcessorFactories, responseProcessorConfigs); + if (config.isEmpty() == false) { + throw new OpenSearchParseException( + "pipeline [" + + id + + "] doesn't support one or more provided configuration parameters " + + Arrays.toString(config.keySet().toArray()) + ); + } + return new PipelineWithMetrics( + id, + description, + version, + requestProcessors, + responseProcessors, + namedWriteableRegistry, + totalRequestProcessingMetrics, + totalResponseProcessingMetrics, + System::nanoTime + ); + + } + + private static List readProcessors( + Map> processorFactories, + List> requestProcessorConfigs + ) throws Exception { + List processors = new ArrayList<>(); + if (requestProcessorConfigs == null) { + return processors; + } + for (Map processorConfigWithKey : requestProcessorConfigs) { + for (Map.Entry entry : processorConfigWithKey.entrySet()) { + String type = entry.getKey(); + if (!processorFactories.containsKey(type)) { + throw new IllegalArgumentException("Invalid processor type " + type); + } + Map config = (Map) entry.getValue(); + String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); + String description = ConfigurationUtils.readOptionalStringProperty(null, tag, config, DESCRIPTION_KEY); + processors.add(processorFactories.get(type).create(processorFactories, tag, description, config)); + } + } + return Collections.unmodifiableList(processors); + } + + @Override + protected void beforeTransformRequest() { + super.beforeTransformRequest(); + totalRequestMetrics.before(); + pipelineRequestMetrics.before(); + } + + @Override + protected void afterTransformRequest(long timeInNanos) { + super.afterTransformRequest(timeInNanos); + totalRequestMetrics.after(timeInNanos); + pipelineRequestMetrics.after(timeInNanos); + } + + @Override + protected void onTransformRequestFailure() { + super.onTransformRequestFailure(); + totalRequestMetrics.failed(); + pipelineRequestMetrics.failed(); + } + + protected void beforeRequestProcessor(Processor processor) { + requestProcessorMetrics.get(getProcessorKey(processor)).before(); + } + + protected void afterRequestProcessor(Processor processor, long timeInNanos) { + requestProcessorMetrics.get(getProcessorKey(processor)).after(timeInNanos); + } + + protected void onRequestProcessorFailed(Processor processor) { + requestProcessorMetrics.get(getProcessorKey(processor)).failed(); + } + + protected void beforeTransformResponse() { + super.beforeTransformRequest(); + totalResponseMetrics.before(); + pipelineResponseMetrics.before(); + } + + protected void afterTransformResponse(long timeInNanos) { + super.afterTransformResponse(timeInNanos); + totalResponseMetrics.after(timeInNanos); + pipelineResponseMetrics.after(timeInNanos); + } + + protected void onTransformResponseFailure() { + super.onTransformResponseFailure(); + totalResponseMetrics.failed(); + pipelineResponseMetrics.failed(); + } + + protected void beforeResponseProcessor(Processor processor) { + responseProcessorMetrics.get(getProcessorKey(processor)).before(); + } + + protected void afterResponseProcessor(Processor processor, long timeInNanos) { + responseProcessorMetrics.get(getProcessorKey(processor)).after(timeInNanos); + } + + protected void onResponseProcessorFailed(Processor processor) { + responseProcessorMetrics.get(getProcessorKey(processor)).failed(); + } + + void copyMetrics(PipelineWithMetrics oldPipeline) { + pipelineRequestMetrics.add(oldPipeline.pipelineRequestMetrics); + pipelineResponseMetrics.add(oldPipeline.pipelineResponseMetrics); + copyProcessorMetrics(requestProcessorMetrics, oldPipeline.requestProcessorMetrics); + copyProcessorMetrics(responseProcessorMetrics, oldPipeline.responseProcessorMetrics); + } + + private static void copyProcessorMetrics( + Map newProcessorMetrics, + Map oldProcessorMetrics + ) { + for (Map.Entry oldProcessorMetric : oldProcessorMetrics.entrySet()) { + if (newProcessorMetrics.containsKey(oldProcessorMetric.getKey())) { + newProcessorMetrics.get(oldProcessorMetric.getKey()).add(oldProcessorMetric.getValue()); + } + } + } + + private static String getProcessorKey(Processor processor) { + String key = processor.getType(); + if (processor.getTag() != null) { + return key + ":" + processor.getTag(); + } + return key; + } + + void populateStats(SearchPipelineStats.Builder statsBuilder) { + statsBuilder.addPipelineStats(getId(), pipelineRequestMetrics, pipelineResponseMetrics); + for (Processor processor : getSearchRequestProcessors()) { + String key = getProcessorKey(processor); + statsBuilder.addRequestProcessorStats(getId(), key, processor.getType(), requestProcessorMetrics.get(key)); + } + for (Processor processor : getSearchResponseProcessors()) { + String key = getProcessorKey(processor); + statsBuilder.addResponseProcessorStats(getId(), key, processor.getType(), responseProcessorMetrics.get(key)); + } + } +} diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 87c09bd971284..434c8fbfacc74 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -30,6 +30,7 @@ import org.opensearch.cluster.service.ClusterManagerTaskThrottler; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.metrics.OperationMetrics; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -80,6 +81,9 @@ public class SearchPipelineService implements ClusterStateApplier, ReportingServ private final NamedWriteableRegistry namedWriteableRegistry; private volatile ClusterState state; + private final OperationMetrics totalRequestProcessingMetrics = new OperationMetrics(); + private final OperationMetrics totalResponseProcessingMetrics = new OperationMetrics(); + private final boolean isEnabled; public SearchPipelineService( @@ -172,26 +176,26 @@ void innerUpdatePipelines(SearchPipelineMetadata newSearchPipelineMetadata) { newPipelines = new HashMap<>(existingPipelines); } try { - Pipeline newPipeline = Pipeline.create( + PipelineWithMetrics newPipeline = PipelineWithMetrics.create( newConfiguration.getId(), newConfiguration.getConfigAsMap(), requestProcessorFactories, responseProcessorFactories, - namedWriteableRegistry + namedWriteableRegistry, + totalRequestProcessingMetrics, + totalResponseProcessingMetrics ); newPipelines.put(newConfiguration.getId(), new PipelineHolder(newConfiguration, newPipeline)); - if (previous == null) { - continue; + if (previous != null) { + newPipeline.copyMetrics(previous.pipeline); } - // TODO -- once we add in pipeline metrics (like in ingest pipelines), we will need to deep-copy - // the old pipeline's metrics into the new pipeline. } catch (Exception e) { OpenSearchParseException parseException = new OpenSearchParseException( "Error updating pipeline with id [" + newConfiguration.getId() + "]", e ); - // TODO -- replace pipeline with one that throws an exception when we try to use it + // TODO -- replace pipeline with one that throws this exception when we try to use it if (exceptions == null) { exceptions = new ArrayList<>(); } @@ -271,12 +275,14 @@ void validatePipeline(Map searchPipelineInfos throw new IllegalStateException("Search pipeline info is empty"); } Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2(); - Pipeline pipeline = Pipeline.create( + Pipeline pipeline = PipelineWithMetrics.create( request.getId(), pipelineConfig, requestProcessorFactories, responseProcessorFactories, - namedWriteableRegistry + namedWriteableRegistry, + new OperationMetrics(), // Use ephemeral metrics for validation + new OperationMetrics() ); List exceptions = new ArrayList<>(); for (SearchRequestProcessor processor : pipeline.getSearchRequestProcessors()) { @@ -367,12 +373,14 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) throws Exce ); } try { - pipeline = Pipeline.create( + pipeline = PipelineWithMetrics.create( AD_HOC_PIPELINE_ID, searchRequest.source().searchPipelineSource(), requestProcessorFactories, responseProcessorFactories, - namedWriteableRegistry + namedWriteableRegistry, + totalRequestProcessingMetrics, + totalResponseProcessingMetrics ); } catch (Exception e) { throw new SearchPipelineProcessingException(e); @@ -400,12 +408,8 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) throws Exce pipeline = pipelineHolder.pipeline; } } - try { - SearchRequest transformedRequest = pipeline.transformRequest(searchRequest); - return new PipelinedRequest(pipeline, transformedRequest); - } catch (Exception e) { - throw new SearchPipelineProcessingException(e); - } + SearchRequest transformedRequest = pipeline.transformRequest(searchRequest); + return new PipelinedRequest(pipeline, transformedRequest); } Map> getRequestProcessorFactories() { @@ -431,6 +435,16 @@ public SearchPipelineInfo info() { ); } + public SearchPipelineStats stats() { + SearchPipelineStats.Builder builder = new SearchPipelineStats.Builder(); + builder.withTotalStats(totalRequestProcessingMetrics, totalResponseProcessingMetrics); + for (PipelineHolder pipelineHolder : pipelines.values()) { + PipelineWithMetrics pipeline = pipelineHolder.pipeline; + pipeline.populateStats(builder); + } + return builder.build(); + } + public static List getPipelines(ClusterState clusterState, String... ids) { SearchPipelineMetadata metadata = clusterState.getMetadata().custom(SearchPipelineMetadata.TYPE); return innerGetPipelines(metadata, ids); @@ -474,9 +488,9 @@ Map getPipelines() { static class PipelineHolder { final PipelineConfiguration configuration; - final Pipeline pipeline; + final PipelineWithMetrics pipeline; - PipelineHolder(PipelineConfiguration configuration, Pipeline pipeline) { + PipelineHolder(PipelineConfiguration configuration, PipelineWithMetrics pipeline) { this.configuration = Objects.requireNonNull(configuration); this.pipeline = Objects.requireNonNull(pipeline); } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineStats.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineStats.java new file mode 100644 index 0000000000000..4261bfe99160a --- /dev/null +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineStats.java @@ -0,0 +1,367 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.metrics.OperationMetrics; +import org.opensearch.common.metrics.OperationStats; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; + +/** + * Serializable, immutable search pipeline statistics to be returned via stats APIs. + * + * @opensearch.internal + */ +public class SearchPipelineStats implements Writeable, ToXContentFragment { + + private final OperationStats totalRequestStats; + private final OperationStats totalResponseStats; + private final List perPipelineStats; + private final Map perPipelineProcessorStats; + + public SearchPipelineStats( + OperationStats totalRequestStats, + OperationStats totalResponseStats, + List perPipelineStats, + Map perPipelineProcessorStats + ) { + this.totalRequestStats = totalRequestStats; + this.totalResponseStats = totalResponseStats; + this.perPipelineStats = perPipelineStats; + this.perPipelineProcessorStats = perPipelineProcessorStats; + } + + public SearchPipelineStats(StreamInput in) throws IOException { + this.totalRequestStats = new OperationStats(in); + this.totalResponseStats = new OperationStats(in); + int size = in.readVInt(); + List perPipelineStats = new ArrayList<>(size); + Map pipelineDetailStatsMap = new TreeMap<>(); + for (int i = 0; i < size; i++) { + String pipelineId = in.readString(); + OperationStats pipelineRequestStats = new OperationStats(in); + OperationStats pipelineResponseStats = new OperationStats(in); + perPipelineStats.add(new PerPipelineStats(pipelineId, pipelineRequestStats, pipelineResponseStats)); + int numRequestProcessors = in.readVInt(); + List requestProcessorStats = new ArrayList<>(numRequestProcessors); + for (int j = 0; j < numRequestProcessors; j++) { + String processorName = in.readString(); + String processorType = in.readString(); + OperationStats processorStats = new OperationStats(in); + requestProcessorStats.add(new ProcessorStats(processorName, processorType, processorStats)); + } + int numResponseProcessors = in.readVInt(); + List responseProcessorStats = new ArrayList<>(numResponseProcessors); + for (int j = 0; j < numResponseProcessors; j++) { + String processorName = in.readString(); + String processorType = in.readString(); + OperationStats processorStats = new OperationStats(in); + responseProcessorStats.add(new ProcessorStats(processorName, processorType, processorStats)); + } + pipelineDetailStatsMap.put( + pipelineId, + new PipelineDetailStats(unmodifiableList(requestProcessorStats), unmodifiableList(responseProcessorStats)) + ); + } + this.perPipelineStats = unmodifiableList(perPipelineStats); + this.perPipelineProcessorStats = unmodifiableMap(pipelineDetailStatsMap); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("search_pipeline"); + builder.startObject("total_request"); + totalRequestStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("total_response"); + totalResponseStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("pipelines"); + for (PerPipelineStats pipelineStat : perPipelineStats) { + builder.startObject(pipelineStat.pipelineId); + builder.startObject("request"); + pipelineStat.requestStats.toXContent(builder, params); + builder.endObject(); + builder.startObject("response"); + pipelineStat.responseStats.toXContent(builder, params); + builder.endObject(); + + PipelineDetailStats pipelineDetailStats = perPipelineProcessorStats.get(pipelineStat.pipelineId); + builder.startArray("request_processors"); + for (ProcessorStats processorStats : pipelineDetailStats.requestProcessorStats) { + builder.startObject(); + processorStats.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + builder.startArray("response_processors"); + for (ProcessorStats processorStats : pipelineDetailStats.responseProcessorStats) { + builder.startObject(); + processorStats.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + builder.endObject(); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + totalRequestStats.writeTo(out); + totalResponseStats.writeTo(out); + out.writeVInt(perPipelineStats.size()); + for (PerPipelineStats pipelineStat : perPipelineStats) { + out.writeString(pipelineStat.pipelineId); + pipelineStat.requestStats.writeTo(out); + pipelineStat.responseStats.writeTo(out); + PipelineDetailStats pipelineDetailStats = perPipelineProcessorStats.get(pipelineStat.pipelineId); + out.writeVInt(pipelineDetailStats.requestProcessorStats.size()); + for (ProcessorStats processorStats : pipelineDetailStats.requestProcessorStats) { + out.writeString(processorStats.processorName); + out.writeString(processorStats.processorType); + processorStats.stats.writeTo(out); + } + out.writeVInt(pipelineDetailStats.responseProcessorStats.size()); + for (ProcessorStats processorStats : pipelineDetailStats.responseProcessorStats) { + out.writeString(processorStats.processorName); + out.writeString(processorStats.processorType); + processorStats.stats.writeTo(out); + } + } + } + + static class Builder { + private OperationStats totalRequestStats; + private OperationStats totalResponseStats; + private final List perPipelineStats = new ArrayList<>(); + private final Map> requestProcessorStatsPerPipeline = new HashMap<>(); + private final Map> responseProcessorStatsPerPipeline = new HashMap<>(); + + Builder withTotalStats(OperationMetrics totalRequestMetrics, OperationMetrics totalResponseMetrics) { + this.totalRequestStats = totalRequestMetrics.createStats(); + this.totalResponseStats = totalResponseMetrics.createStats(); + return this; + } + + Builder addPipelineStats(String pipelineId, OperationMetrics pipelineRequestMetrics, OperationMetrics pipelineResponseMetrics) { + this.perPipelineStats.add( + new PerPipelineStats(pipelineId, pipelineRequestMetrics.createStats(), pipelineResponseMetrics.createStats()) + ); + return this; + } + + Builder addRequestProcessorStats(String pipelineId, String processorName, String processorType, OperationMetrics processorMetrics) { + this.requestProcessorStatsPerPipeline.computeIfAbsent(pipelineId, k -> new ArrayList<>()) + .add(new ProcessorStats(processorName, processorType, processorMetrics.createStats())); + return this; + } + + Builder addResponseProcessorStats( + String pipelineId, + String processorName, + String processorType, + OperationMetrics processorMetrics + ) { + this.responseProcessorStatsPerPipeline.computeIfAbsent(pipelineId, k -> new ArrayList<>()) + .add(new ProcessorStats(processorName, processorType, processorMetrics.createStats())); + return this; + } + + SearchPipelineStats build() { + Map pipelineDetailStatsMap = new TreeMap<>(); + for (PerPipelineStats pipelineStat : perPipelineStats) { + List requestProcessorStats = requestProcessorStatsPerPipeline.getOrDefault( + pipelineStat.pipelineId, + emptyList() + ); + List responseProcessorStats = responseProcessorStatsPerPipeline.getOrDefault( + pipelineStat.pipelineId, + emptyList() + ); + PipelineDetailStats pipelineDetailStats = new PipelineDetailStats( + unmodifiableList(requestProcessorStats), + unmodifiableList(responseProcessorStats) + ); + pipelineDetailStatsMap.put(pipelineStat.pipelineId, pipelineDetailStats); + } + return new SearchPipelineStats( + totalRequestStats, + totalResponseStats, + unmodifiableList(perPipelineStats), + unmodifiableMap(pipelineDetailStatsMap) + ); + } + } + + static class PerPipelineStats { + private final String pipelineId; + private final OperationStats requestStats; + private final OperationStats responseStats; + + public PerPipelineStats(String pipelineId, OperationStats requestStats, OperationStats responseStats) { + this.pipelineId = pipelineId; + this.requestStats = requestStats; + this.responseStats = responseStats; + } + + public String getPipelineId() { + return pipelineId; + } + + public OperationStats getRequestStats() { + return requestStats; + } + + public OperationStats getResponseStats() { + return responseStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PerPipelineStats that = (PerPipelineStats) o; + return pipelineId.equals(that.pipelineId) && requestStats.equals(that.requestStats) && responseStats.equals(that.responseStats); + } + + @Override + public int hashCode() { + return Objects.hash(pipelineId, requestStats, responseStats); + } + } + + static class PipelineDetailStats { + private final List requestProcessorStats; + private final List responseProcessorStats; + + public PipelineDetailStats(List requestProcessorStats, List responseProcessorStats) { + this.requestProcessorStats = requestProcessorStats; + this.responseProcessorStats = responseProcessorStats; + } + + public List requestProcessorStats() { + return requestProcessorStats; + } + + public List responseProcessorStats() { + return responseProcessorStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PipelineDetailStats that = (PipelineDetailStats) o; + return requestProcessorStats.equals(that.requestProcessorStats) && responseProcessorStats.equals(that.responseProcessorStats); + } + + @Override + public int hashCode() { + return Objects.hash(requestProcessorStats, responseProcessorStats); + } + } + + static class ProcessorStats implements ToXContentFragment { + private final String processorName; // type:tag + private final String processorType; + private final OperationStats stats; + + public ProcessorStats(String processorName, String processorType, OperationStats stats) { + this.processorName = processorName; + this.processorType = processorType; + this.stats = stats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ProcessorStats that = (ProcessorStats) o; + return processorName.equals(that.processorName) && processorType.equals(that.processorType) && stats.equals(that.stats); + } + + @Override + public int hashCode() { + return Objects.hash(processorName, processorType, stats); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(processorName); + builder.field("type", processorType); + builder.startObject("stats"); + stats.toXContent(builder, params); + builder.endObject(); + builder.endObject(); + return builder; + } + + String getProcessorName() { + return processorName; + } + + String getProcessorType() { + return processorType; + } + + OperationStats getStats() { + return stats; + } + } + + OperationStats getTotalRequestStats() { + return totalRequestStats; + } + + OperationStats getTotalResponseStats() { + return totalResponseStats; + } + + List getPipelineStats() { + return perPipelineStats; + } + + Map getPerPipelineProcessorStats() { + return perPipelineProcessorStats; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SearchPipelineStats stats = (SearchPipelineStats) o; + return totalRequestStats.equals(stats.totalRequestStats) + && totalResponseStats.equals(stats.totalResponseStats) + && perPipelineStats.equals(stats.perPipelineStats) + && perPipelineProcessorStats.equals(stats.perPipelineProcessorStats); + } + + @Override + public int hashCode() { + return Objects.hash(totalRequestStats, totalResponseStats, perPipelineStats, perPipelineProcessorStats); + } +} diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 3d4b04889a5c9..ebd0e59599c21 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -450,12 +450,25 @@ public ClusterState execute(ClusterState currentState) { final boolean isSearchableSnapshot = IndexModule.Type.REMOTE_SNAPSHOT.match( snapshotIndexMetadata.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey()) ); + final boolean isRemoteStoreShallowCopy = Boolean.TRUE.equals( + snapshotInfo.isRemoteStoreIndexShallowCopyEnabled() + ) && metadata.index(index).getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false); + if (isRemoteStoreShallowCopy && !currentState.getNodes().getMinNodeVersion().onOrAfter(Version.V_2_9_0)) { + throw new SnapshotRestoreException( + snapshot, + "cannot restore shallow copy snapshot for index [" + + index + + "] as some of the nodes in cluster have version less than 2.9" + ); + } final SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( restoreUUID, snapshot, snapshotInfo.version(), repositoryData.resolveIndexId(index), - isSearchableSnapshot + isSearchableSnapshot, + isRemoteStoreShallowCopy, + request.getSourceRemoteStoreRepository() ); final Version minIndexCompatibilityVersion; if (isSearchableSnapshot && isSearchableSnapshotsExtendedCompatibilityEnabled()) { diff --git a/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java new file mode 100644 index 0000000000000..ed111b34f048f --- /dev/null +++ b/server/src/main/java/org/opensearch/tasks/TaskThreadContextStatePropagator.java @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.tasks; + +import org.opensearch.common.util.concurrent.ThreadContextStatePropagator; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.tasks.TaskResourceTrackingService.TASK_ID; + +/** + * Propagates TASK_ID across thread contexts + */ +public class TaskThreadContextStatePropagator implements ThreadContextStatePropagator { + @Override + public Map transients(Map source) { + final Map transients = new HashMap<>(); + + if (source.containsKey(TASK_ID)) { + transients.put(TASK_ID, source.get(TASK_ID)); + } + + return transients; + } + + @Override + public Map headers(Map source) { + return Collections.emptyMap(); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetryModule.java b/server/src/main/java/org/opensearch/telemetry/TelemetryModule.java new file mode 100644 index 0000000000000..604c111b0720c --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/TelemetryModule.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.plugins.TelemetryPlugin; + +import java.util.List; +import java.util.Optional; + +/** + * A module for loading classes for telemetry + * + * @opensearch.internal + */ +public class TelemetryModule { + + private Telemetry telemetry; + + public TelemetryModule(List telemetryPlugins, TelemetrySettings telemetrySettings) { + + for (TelemetryPlugin telemetryPlugin : telemetryPlugins) { + Optional telemetry = telemetryPlugin.getTelemetry(telemetrySettings); + if (telemetry.isPresent()) { + registerTelemetry(telemetry.get()); + } + } + } + + public Optional getTelemetry() { + return Optional.ofNullable(telemetry); + } + + private void registerTelemetry(Telemetry factory) { + if (telemetry == null) { + telemetry = factory; + } else { + throw new IllegalArgumentException("Cannot register more than one telemetry"); + } + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java new file mode 100644 index 0000000000000..7c9e0d5ac8097 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/TelemetrySettings.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; + +/** + * Wrapper class to encapsulate tracing related settings + */ +public class TelemetrySettings { + public static final Setting TRACER_ENABLED_SETTING = Setting.boolSetting( + "telemetry.tracer.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + + private volatile boolean tracingEnabled; + + public TelemetrySettings(Settings settings, ClusterSettings clusterSettings) { + this.tracingEnabled = TRACER_ENABLED_SETTING.get(settings); + + clusterSettings.addSettingsUpdateConsumer(TRACER_ENABLED_SETTING, this::setTracingEnabled); + } + + public void setTracingEnabled(boolean tracingEnabled) { + this.tracingEnabled = tracingEnabled; + } + + public boolean isTracingEnabled() { + return tracingEnabled; + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/package-info.java b/server/src/main/java/org/opensearch/telemetry/package-info.java new file mode 100644 index 0000000000000..4545f0ef5990e --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for telemetry. + */ +package org.opensearch.telemetry; diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java new file mode 100644 index 0000000000000..3d7f8133788ce --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/NoopTracerFactory.java @@ -0,0 +1,33 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.telemetry.tracing.noop.NoopTracer; + +import java.util.Optional; + +/** + * No-op implementation of TracerFactory + */ +public class NoopTracerFactory extends TracerFactory { + public NoopTracerFactory() { + super(null, Optional.empty(), null); + } + + @Override + public Tracer getTracer() { + return NoopTracer.INSTANCE; + } + + @Override + public void close() { + + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java new file mode 100644 index 0000000000000..0d0b795fdc715 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/ThreadContextBasedTracerContextStorage.java @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.util.concurrent.ThreadContextStatePropagator; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +/** + * Core's ThreadContext based TracerContextStorage implementation + */ +public class ThreadContextBasedTracerContextStorage implements TracerContextStorage, ThreadContextStatePropagator { + + private final ThreadContext threadContext; + + private final TracingTelemetry tracingTelemetry; + + public ThreadContextBasedTracerContextStorage(ThreadContext threadContext, TracingTelemetry tracingTelemetry) { + this.threadContext = Objects.requireNonNull(threadContext); + this.tracingTelemetry = Objects.requireNonNull(tracingTelemetry); + this.threadContext.registerThreadContextStatePropagator(this); + } + + @Override + public Span get(String key) { + return getCurrentSpan(key); + } + + @Override + public void put(String key, Span span) { + if (span == null) { + return; + } + SpanReference currentSpanRef = threadContext.getTransient(key); + if (currentSpanRef == null) { + threadContext.putTransient(key, new SpanReference(span)); + } else { + currentSpanRef.setSpan(span); + } + } + + @Override + public Map transients(Map source) { + final Map transients = new HashMap<>(); + + if (source.containsKey(CURRENT_SPAN)) { + final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); + if (current != null) { + transients.put(CURRENT_SPAN, new SpanReference(current.getSpan())); + } + } + + return transients; + } + + @Override + public Map headers(Map source) { + final Map headers = new HashMap<>(); + + if (source.containsKey(CURRENT_SPAN)) { + final SpanReference current = (SpanReference) source.get(CURRENT_SPAN); + if (current != null) { + tracingTelemetry.getContextPropagator().inject(current.getSpan(), (key, value) -> headers.put(key, value)); + } + } + + return headers; + } + + Span getCurrentSpan(String key) { + Optional optionalSpanFromContext = spanFromThreadContext(key); + return optionalSpanFromContext.orElse(spanFromHeader()); + } + + private Optional spanFromThreadContext(String key) { + SpanReference currentSpanRef = threadContext.getTransient(key); + return (currentSpanRef == null) ? Optional.empty() : Optional.ofNullable(currentSpanRef.getSpan()); + } + + private Span spanFromHeader() { + return tracingTelemetry.getContextPropagator().extract(threadContext.getHeaders()); + } +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java new file mode 100644 index 0000000000000..8228cded4c822 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/TracerFactory.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.tracing.noop.NoopTracer; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Optional; + +/** + * TracerManager represents a single global class that is used to access tracers. + * + * The Tracer singleton object can be retrieved using tracerManager.getTracer(). The TracerManager object + * is created during class initialization and cannot subsequently be changed. + */ +public class TracerFactory implements Closeable { + + private static final Logger logger = LogManager.getLogger(TracerFactory.class); + + private final TelemetrySettings telemetrySettings; + private final Tracer defaultTracer; + + public TracerFactory(TelemetrySettings telemetrySettings, Optional telemetry, ThreadContext threadContext) { + this.telemetrySettings = telemetrySettings; + this.defaultTracer = telemetry.map(Telemetry::getTracingTelemetry) + .map(tracingTelemetry -> createDefaultTracer(tracingTelemetry, threadContext)) + .orElse(NoopTracer.INSTANCE); + } + + /** + * Returns the tracer instance + * @return tracer instance + */ + public Tracer getTracer() { + return telemetrySettings.isTracingEnabled() ? defaultTracer : NoopTracer.INSTANCE; + } + + /** + * Closes the {@link Tracer} + */ + @Override + public void close() { + try { + defaultTracer.close(); + } catch (IOException e) { + logger.warn("Error closing tracer", e); + } + } + + private Tracer createDefaultTracer(TracingTelemetry tracingTelemetry, ThreadContext threadContext) { + TracerContextStorage tracerContextStorage = new ThreadContextBasedTracerContextStorage( + threadContext, + tracingTelemetry + ); + return new DefaultTracer(tracingTelemetry, tracerContextStorage); + } + +} diff --git a/server/src/main/java/org/opensearch/telemetry/tracing/package-info.java b/server/src/main/java/org/opensearch/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..4ac1e4c212c81 --- /dev/null +++ b/server/src/main/java/org/opensearch/telemetry/tracing/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This package contains classes needed for tracing requests. + */ +package org.opensearch.telemetry.tracing; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index d99b93b780140..1e2e085333e50 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.service.ClusterManagerThrottlingStats; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.discovery.DiscoveryStats; import org.opensearch.cluster.coordination.PendingClusterStateStats; import org.opensearch.cluster.coordination.PublishClusterStateStats; @@ -338,40 +339,31 @@ public void testSerialization() throws IOException { if (ingestStats == null) { assertNull(deserializedIngestStats); } else { - IngestStats.Stats totalStats = ingestStats.getTotalStats(); - assertEquals(totalStats.getIngestCount(), deserializedIngestStats.getTotalStats().getIngestCount()); - assertEquals(totalStats.getIngestCurrent(), deserializedIngestStats.getTotalStats().getIngestCurrent()); - assertEquals(totalStats.getIngestFailedCount(), deserializedIngestStats.getTotalStats().getIngestFailedCount()); - assertEquals(totalStats.getIngestTimeInMillis(), deserializedIngestStats.getTotalStats().getIngestTimeInMillis()); + OperationStats totalStats = ingestStats.getTotalStats(); + assertEquals(totalStats.getCount(), deserializedIngestStats.getTotalStats().getCount()); + assertEquals(totalStats.getCurrent(), deserializedIngestStats.getTotalStats().getCurrent()); + assertEquals(totalStats.getFailedCount(), deserializedIngestStats.getTotalStats().getFailedCount()); + assertEquals(totalStats.getTotalTimeInMillis(), deserializedIngestStats.getTotalStats().getTotalTimeInMillis()); assertEquals(ingestStats.getPipelineStats().size(), deserializedIngestStats.getPipelineStats().size()); for (IngestStats.PipelineStat pipelineStat : ingestStats.getPipelineStats()) { String pipelineId = pipelineStat.getPipelineId(); - IngestStats.Stats deserializedPipelineStats = getPipelineStats( - deserializedIngestStats.getPipelineStats(), - pipelineId - ); - assertEquals(pipelineStat.getStats().getIngestFailedCount(), deserializedPipelineStats.getIngestFailedCount()); - assertEquals(pipelineStat.getStats().getIngestTimeInMillis(), deserializedPipelineStats.getIngestTimeInMillis()); - assertEquals(pipelineStat.getStats().getIngestCurrent(), deserializedPipelineStats.getIngestCurrent()); - assertEquals(pipelineStat.getStats().getIngestCount(), deserializedPipelineStats.getIngestCount()); + OperationStats deserializedPipelineStats = getPipelineStats(deserializedIngestStats.getPipelineStats(), pipelineId); + assertEquals(pipelineStat.getStats().getFailedCount(), deserializedPipelineStats.getFailedCount()); + assertEquals(pipelineStat.getStats().getTotalTimeInMillis(), deserializedPipelineStats.getTotalTimeInMillis()); + assertEquals(pipelineStat.getStats().getCurrent(), deserializedPipelineStats.getCurrent()); + assertEquals(pipelineStat.getStats().getCount(), deserializedPipelineStats.getCount()); List processorStats = ingestStats.getProcessorStats().get(pipelineId); // intentionally validating identical order Iterator it = deserializedIngestStats.getProcessorStats().get(pipelineId).iterator(); for (IngestStats.ProcessorStat processorStat : processorStats) { IngestStats.ProcessorStat deserializedProcessorStat = it.next(); + assertEquals(processorStat.getStats().getFailedCount(), deserializedProcessorStat.getStats().getFailedCount()); assertEquals( - processorStat.getStats().getIngestFailedCount(), - deserializedProcessorStat.getStats().getIngestFailedCount() - ); - assertEquals( - processorStat.getStats().getIngestTimeInMillis(), - deserializedProcessorStat.getStats().getIngestTimeInMillis() + processorStat.getStats().getTotalTimeInMillis(), + deserializedProcessorStat.getStats().getTotalTimeInMillis() ); - assertEquals( - processorStat.getStats().getIngestCurrent(), - deserializedProcessorStat.getStats().getIngestCurrent() - ); - assertEquals(processorStat.getStats().getIngestCount(), deserializedProcessorStat.getStats().getIngestCount()); + assertEquals(processorStat.getStats().getCurrent(), deserializedProcessorStat.getStats().getCurrent()); + assertEquals(processorStat.getStats().getCount(), deserializedProcessorStat.getStats().getCount()); } assertFalse(it.hasNext()); } @@ -650,7 +642,7 @@ public static NodeStats createNodeStats() { : null; IngestStats ingestStats = null; if (frequently()) { - IngestStats.Stats totalStats = new IngestStats.Stats( + OperationStats totalStats = new OperationStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), @@ -665,7 +657,7 @@ public static NodeStats createNodeStats() { ingestPipelineStats.add( new IngestStats.PipelineStat( pipelineId, - new IngestStats.Stats( + new OperationStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), @@ -676,7 +668,7 @@ public static NodeStats createNodeStats() { List processorPerPipeline = new ArrayList<>(numProcessors); for (int j = 0; j < numProcessors; j++) { - IngestStats.Stats processorStats = new IngestStats.Stats( + OperationStats processorStats = new OperationStats( randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), @@ -750,11 +742,12 @@ public static NodeStats createNodeStats() { clusterManagerThrottlingStats, weightedRoutingStats, null, + null, null ); } - private IngestStats.Stats getPipelineStats(List pipelineStats, String id) { + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index 737e7b2e4887b..bb55ac810ed09 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -112,6 +112,10 @@ private RestoreSnapshotRequest randomState(RestoreSnapshotRequest instance) { instance.snapshotUuid(randomBoolean() ? null : randomAlphaOfLength(10)); } + if (randomBoolean()) { + instance.setSourceRemoteStoreRepository(randomAlphaOfLengthBetween(5, 10)); + } + return instance; } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java index d3a40868bc389..627ada7092273 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodesTests.java @@ -89,15 +89,15 @@ public void testIngestStats() throws Exception { processorStats.compute(stat.getType(), (key, value) -> { if (value == null) { return new long[] { - stat.getStats().getIngestCount(), - stat.getStats().getIngestFailedCount(), - stat.getStats().getIngestCurrent(), - stat.getStats().getIngestTimeInMillis() }; + stat.getStats().getCount(), + stat.getStats().getFailedCount(), + stat.getStats().getCurrent(), + stat.getStats().getTotalTimeInMillis() }; } else { - value[0] += stat.getStats().getIngestCount(); - value[1] += stat.getStats().getIngestFailedCount(); - value[2] += stat.getStats().getIngestCurrent(); - value[3] += stat.getStats().getIngestTimeInMillis(); + value[0] += stat.getStats().getCount(); + value[1] += stat.getStats().getFailedCount(); + value[2] += stat.getStats().getCurrent(); + value[3] += stat.getStats().getTotalTimeInMillis(); return value; } }); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java index 85aa60d6f308b..57a41f08f73f2 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/cache/clear/TransportClearIndicesCacheActionTests.java @@ -85,7 +85,7 @@ public void testOnShardOperation() throws IOException { when(shardRouting.shardId()).thenReturn(shardId); final ShardPath shardPath = ShardPath.loadFileCachePath(nodeEnvironment, shardId); final Path cacheEntryPath = shardPath.getDataPath(); - final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(1024 * 1024 * 1024, 16, new NoopCircuitBreaker("")); + final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache(1024 * 1024, 16, new NoopCircuitBreaker("")); when(testNode.fileCache()).thenReturn(fileCache); when(testNode.getNodeEnvironment()).thenReturn(nodeEnvironment); diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index 73349d45bd5c7..e5833ea619774 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -189,6 +189,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -214,6 +215,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -239,6 +241,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -295,6 +298,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -320,6 +324,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -345,6 +350,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java b/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java index e4aae52f41e68..a5c006362a20c 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RecoverySourceTests.java @@ -36,6 +36,8 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.repositories.IndexId; +import org.opensearch.snapshots.Snapshot; +import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -54,6 +56,28 @@ public void testSerialization() throws IOException { assertEquals(recoverySource, serializedRecoverySource); } + public void testSerializationSnapshotRecoverySource() throws IOException { + boolean isSearchableSnapshot = randomBoolean(); + boolean isRemoteStoreShallowCopyEnabled = randomBoolean(); + String sourceRemoteStoreRepo = "test-remote-repo"; + RecoverySource.SnapshotRecoverySource recoverySource = new RecoverySource.SnapshotRecoverySource( + UUIDs.randomBase64UUID(), + new Snapshot("repo", new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())), + Version.CURRENT, + new IndexId("some_index", UUIDs.randomBase64UUID(random())), + isSearchableSnapshot, + isRemoteStoreShallowCopyEnabled, + sourceRemoteStoreRepo + ); + BytesStreamOutput out = new BytesStreamOutput(); + recoverySource.writeTo(out); + RecoverySource serializedRecoverySource = RecoverySource.readFrom(out.bytes().streamInput()); + assertEquals(recoverySource.getType(), serializedRecoverySource.getType()); + assertEquals(recoverySource, serializedRecoverySource); + assertEquals(recoverySource.remoteStoreIndexShallowCopy(), isRemoteStoreShallowCopyEnabled); + assertEquals(recoverySource.isSearchableSnapshot(), isSearchableSnapshot); + } + public void testRecoverySourceTypeOrder() { assertEquals(RecoverySource.Type.EMPTY_STORE.ordinal(), 0); assertEquals(RecoverySource.Type.EXISTING_STORE.ordinal(), 1); diff --git a/server/src/test/java/org/opensearch/common/util/ReorganizingLongHashTests.java b/server/src/test/java/org/opensearch/common/util/ReorganizingLongHashTests.java new file mode 100644 index 0000000000000..259eab6c624bd --- /dev/null +++ b/server/src/test/java/org/opensearch/common/util/ReorganizingLongHashTests.java @@ -0,0 +1,146 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.util; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.HashMap; +import java.util.Map; + +public class ReorganizingLongHashTests extends OpenSearchTestCase { + + public void testFuzzy() { + Map reference = new HashMap<>(); + + try ( + ReorganizingLongHash h = new ReorganizingLongHash( + randomIntBetween(1, 100), // random capacity + 0.6f + randomFloat() * 0.39f, // random load factor to verify collision resolution + BigArrays.NON_RECYCLING_INSTANCE + ) + ) { + // Verify the behaviour of "add" and "find". + for (int i = 0; i < (1 << 20); i++) { + long key = randomLong() % (1 << 12); // roughly ~4% unique keys + if (reference.containsKey(key)) { + long expectedOrdinal = reference.get(key); + assertEquals(-1 - expectedOrdinal, h.add(key)); + assertEquals(expectedOrdinal, h.find(key)); + } else { + assertEquals(-1, h.find(key)); + reference.put(key, (long) reference.size()); + assertEquals((long) reference.get(key), h.add(key)); + } + } + + // Verify the behaviour of "get". + for (Map.Entry entry : reference.entrySet()) { + assertEquals((long) entry.getKey(), h.get(entry.getValue())); + } + + // Verify the behaviour of "size". + assertEquals(reference.size(), h.size()); + + // Verify the calculation of PSLs. + final long capacity = h.getTable().size(); + final long mask = capacity - 1; + for (long idx = 0; idx < h.getTable().size(); idx++) { + final long value = h.getTable().get(idx); + if (value != -1) { + final long homeIdx = h.hash(h.get((int) value)) & mask; + assertEquals((capacity + idx - homeIdx) & mask, value >>> 48); + } + } + } + } + + public void testRearrangement() { + try (ReorganizingLongHash h = new ReorganizingLongHash(4, 0.6f, BigArrays.NON_RECYCLING_INSTANCE) { + /** + * Overriding with an "identity" hash function to make it easier to reason about the placement + * of values in the hash table. The backing array of the hash table will have a size (8), + * i.e. nextPowerOfTwo(initialCapacity/loadFactor), so the bitmask will be (7). + * The ideal home slot of a key can then be defined as: (hash(key) & mask) = (key & 7). + */ + @Override + long hash(long key) { + return key; + } + }) { + /* + * Add key=0, hash=0, home_slot=0 + * + * Before: empty slot. + * â–¼ + * [ _ _ _ _ _ _ _ _ ] + * + * After: inserted [ordinal=0, psl=0] at the empty slot. + * [ 0 _ _ _ _ _ _ _ ] + */ + h.add(0); + assertEquals(encodeValue(0, 0, 0), h.getTable().get(0)); + + /* + * Add key=8, hash=8, home_slot=0 + * + * Before: occupied slot. + * â–¼ + * [ 0 _ _ _ _ _ _ _ ] + * + * After: inserted [ordinal=1, psl=0] at the existing slot, displaced [ordinal=0, psl=0], + * and re-inserted it at the next empty slot as [ordinal=0, psl=1]. + * [ 1 0 _ _ _ _ _ _ ] + */ + h.add(8); + assertEquals(encodeValue(0, 0, 1), h.getTable().get(0)); + assertEquals(encodeValue(1, 0, 0), h.getTable().get(1)); + + /* + * Add key=1, hash=1, home_slot=1 + * + * Before: occupied slot. + * â–¼ + * [ 1 0 _ _ _ _ _ _ ] + * + * After: inserted [ordinal=2, psl=0] at the existing slot, displaced [ordinal=0, psl=1], + * and re-inserted it at the next empty slot as [ordinal=0, psl=2]. + * [ 1 2 0 _ _ _ _ _ ] + */ + h.add(1); + assertEquals(encodeValue(0, 0, 1), h.getTable().get(0)); + assertEquals(encodeValue(0, 0, 2), h.getTable().get(1)); + assertEquals(encodeValue(2, 0, 0), h.getTable().get(2)); + + /* + * Add key=16, hash=16, home_slot=0 + * + * Before: occupied slot. + * â–¼ + * [ 1 2 0 _ _ _ _ _ ] + * + * After: inserted [ordinal=3, psl=0] at the existing slot, displaced [ordinal=1, psl=0] + * and re-inserted it at the next best slot. Repeated this for other displaced values + * until everything found an empty slot. + * [ 3 1 0 2 _ _ _ _ ] + */ + h.add(16); + assertEquals(encodeValue(0, 0, 3), h.getTable().get(0)); + assertEquals(encodeValue(1, 0, 1), h.getTable().get(1)); + assertEquals(encodeValue(2, 0, 0), h.getTable().get(2)); + assertEquals(encodeValue(2, 0, 2), h.getTable().get(3)); + } + } + + private static long encodeValue(long psl, long fingerprint, long ordinal) { + assert psl < (1L << 15); + assert fingerprint < (1L << 16); + assert ordinal < (1L << 32); + return (psl << 48) | (fingerprint << 32) | ordinal; + } +} diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 1c0ebf17285f7..58527dbea5791 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; +import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -49,6 +50,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.junit.Assert; +import org.opensearch.common.io.PathUtils; import org.opensearch.core.Assertions; import org.opensearch.OpenSearchException; import org.opensearch.Version; @@ -127,6 +129,7 @@ import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreStats; import org.opensearch.index.store.StoreUtils; @@ -188,7 +191,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; - +import java.util.Collection; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -2797,6 +2800,78 @@ public void restoreShard( closeShards(target); } + public void testSyncSegmentsFromGivenRemoteSegmentStore() throws IOException { + String remoteStorePath = createTempDir().toString(); + IndexShard source = newStartedShard( + true, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStorePath + "__test") + .build(), + new InternalEngineFactory() + ); + indexDoc(source, "_doc", "1"); + indexDoc(source, "_doc", "2"); + source.refresh("test"); + assertDocs(source, "1", "2"); + indexDoc(source, "_doc", "3"); + source.refresh("test"); + flushShard(source); + + indexDoc(source, "_doc", "5"); + source.refresh("test"); + + indexDoc(source, "_doc", "4"); + source.refresh("test"); + + long primaryTerm; + long commitGeneration; + try (GatedCloseable segmentInfosGatedCloseable = source.getSegmentInfosSnapshot()) { + SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + primaryTerm = source.getOperationPrimaryTerm(); + commitGeneration = segmentInfos.getGeneration(); + } + Collection lastCommitedSegmentsInSource = SegmentInfos.readLatestCommit(source.store().directory()).files(false); + + closeShards(source); + + RemoteSegmentStoreDirectory tempRemoteSegmentDirectory = createRemoteSegmentStoreDirectory( + source.shardId(), + PathUtils.get(remoteStorePath) + ); + + IndexShard target = newStartedShard( + true, + Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .build(), + new InternalEngineFactory() + ); + ShardRouting routing = ShardRoutingHelper.initWithSameId( + target.routingEntry(), + RecoverySource.ExistingStoreRecoverySource.INSTANCE + ); + routing = ShardRoutingHelper.newWithRestoreSource(routing, new RecoverySource.EmptyStoreRecoverySource()); + + target = reinitShard(target, routing); + + target.syncSegmentsFromGivenRemoteSegmentStore(false, tempRemoteSegmentDirectory, primaryTerm, commitGeneration); + RemoteSegmentStoreDirectory remoteStoreDirectory = ((RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) target + .remoteStore() + .directory()).getDelegate()).getDelegate()); + Collection uploadFiles = remoteStoreDirectory.getSegmentsUploadedToRemoteStore().keySet(); + assertTrue(uploadFiles.containsAll(lastCommitedSegmentsInSource)); + assertTrue( + "Failed to sync all files to new shard", + List.of(target.store().directory().listAll()).containsAll(lastCommitedSegmentsInSource) + ); + Directory storeDirectory = ((FilterDirectory) ((FilterDirectory) target.store().directory()).getDelegate()).getDelegate(); + ((BaseDirectoryWrapper) storeDirectory).setCheckIndexOnClose(false); + closeShards(target); + } + public void testRefreshLevelRestoreShardFromRemoteStore() throws IOException { testRestoreShardFromRemoteStore(false); } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index fec9b04d6e371..3417e7b0aee04 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -228,7 +228,8 @@ private Map getDummyMetadata(String prefix, int commitGeneration * @return ByteArrayIndexInput: metadata file bytes with header and footer * @throws IOException IOException */ - private ByteArrayIndexInput createMetadataFileBytes(Map segmentFilesMap, long generation) throws IOException { + private ByteArrayIndexInput createMetadataFileBytes(Map segmentFilesMap, long generation, long primaryTerm) + throws IOException { ByteBuffersDataOutput byteBuffersIndexOutput = new ByteBuffersDataOutput(); segmentInfos.write(new ByteBuffersIndexOutput(byteBuffersIndexOutput, "", "")); byte[] byteArray = byteBuffersIndexOutput.toArrayCopy(); @@ -238,6 +239,7 @@ private ByteArrayIndexInput createMetadataFileBytes(Map segmentF CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, RemoteSegmentMetadata.CURRENT_VERSION); indexOutput.writeMapOfStrings(segmentFilesMap); indexOutput.writeLong(generation); + indexOutput.writeLong(primaryTerm); indexOutput.writeLong(byteArray.length); indexOutput.writeBytes(byteArray, byteArray.length); CodecUtil.writeFooter(indexOutput); @@ -261,14 +263,14 @@ private Map> populateMetadata() throws IOException { ); when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1, 5) ); when(remoteMetadataDirectory.openInput("metadata__1__6__pqr", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__6__pqr"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__6__pqr"), 1, 6) ); when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1), - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1, 2), + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1, 2) ); return metadataFilenameContentMapping; @@ -503,7 +505,7 @@ public void testGetSegmentsUploadedToRemoteStore() throws IOException { ); when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1) + createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1, 5) ); assert (remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore(testPrimaryTerm, testGeneration).containsKey("segments_5")); @@ -577,7 +579,9 @@ public void testContainsFile() throws IOException { metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512"); metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024"); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(createMetadataFileBytes(metadata, 1)); + when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( + createMetadataFileBytes(metadata, 1, 5) + ); remoteSegmentStoreDirectory.init(); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java index 3924cd4ed1913..5bdba0513af23 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheCleanerTests.java @@ -48,7 +48,7 @@ public class FileCacheCleanerTests extends OpenSearchTestCase { ); private final FileCache fileCache = FileCacheFactory.createConcurrentLRUFileCache( - 1024 * 1024 * 1024, + 1024 * 1024, 1, new NoopCircuitBreaker(CircuitBreaker.REQUEST) ); diff --git a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java index 43a5c04b59f83..02b6a48b6f48e 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/filecache/FileCacheTests.java @@ -31,7 +31,6 @@ public class FileCacheTests extends OpenSearchTestCase { // but fatal to these tests private final static int CONCURRENCY_LEVEL = 16; private final static int MEGA_BYTES = 1024 * 1024; - private final static int GIGA_BYTES = 1024 * 1024 * 1024; private final static String FAKE_PATH_SUFFIX = "Suffix"; private Path path; @@ -66,15 +65,9 @@ private void createFile(String indexName, String shardId, String fileName) throw Files.write(filePath, "test-data".getBytes()); } - public void testCreateCacheWithSmallSegments() { - assertThrows(IllegalStateException.class, () -> { - FileCacheFactory.createConcurrentLRUFileCache(1000, CONCURRENCY_LEVEL, new NoopCircuitBreaker(CircuitBreaker.REQUEST)); - }); - } - // test get method public void testGet() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(8 * MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -86,27 +79,27 @@ public void testGet() { public void testGetThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.get(null); }); } public void testPutThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.put(null, null); }); } public void testPutThrowCircuitBreakingException() { - FileCache fileCache = createCircuitBreakingFileCache(GIGA_BYTES); + FileCache fileCache = createCircuitBreakingFileCache(MEGA_BYTES); Path path = createPath("0"); assertThrows(CircuitBreakingException.class, () -> fileCache.put(path, new StubCachedIndexInput(8 * MEGA_BYTES))); assertNull(fileCache.get(path)); } public void testCompute() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); Path path = createPath("0"); fileCache.put(path, new StubCachedIndexInput(8 * MEGA_BYTES)); fileCache.incRef(path); @@ -117,20 +110,20 @@ public void testCompute() { public void testComputeThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.compute(null, null); }); } public void testComputeThrowCircuitBreakingException() { - FileCache fileCache = createCircuitBreakingFileCache(GIGA_BYTES); + FileCache fileCache = createCircuitBreakingFileCache(MEGA_BYTES); Path path = createPath("0"); assertThrows(CircuitBreakingException.class, () -> fileCache.compute(path, (p, i) -> new StubCachedIndexInput(8 * MEGA_BYTES))); assertNull(fileCache.get(path)); } public void testRemove() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -145,13 +138,13 @@ public void testRemove() { public void testRemoveThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.remove(null); }); } public void testIncDecRef() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -184,7 +177,7 @@ public void testIncDecRef() { public void testIncRefThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.incRef(null); }); @@ -192,19 +185,19 @@ public void testIncRefThrowException() { public void testDecRefThrowException() { assertThrows(NullPointerException.class, () -> { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); fileCache.decRef(null); }); } public void testCapacity() { - FileCache fileCache = createFileCache(GIGA_BYTES); - assertEquals(fileCache.capacity(), GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); + assertEquals(fileCache.capacity(), MEGA_BYTES); } public void testSize() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -213,34 +206,34 @@ public void testSize() { } public void testPrune() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } // before prune - assertEquals(fileCache.size(), 4); + assertTrue(fileCache.size() >= 1); fileCache.prune(); // after prune - assertEquals(fileCache.size(), 0); + assertEquals(0, fileCache.size()); } public void testPruneWithPredicate() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { putAndDecRef(fileCache, i, 8 * MEGA_BYTES); } // before prune - assertEquals(fileCache.size(), 4); + assertTrue(fileCache.size() >= 1); // after prune with false predicate fileCache.prune(path -> false); - assertEquals(fileCache.size(), 4); + assertTrue(fileCache.size() >= 1); // after prune with true predicate fileCache.prune(path -> true); - assertEquals(fileCache.size(), 0); + assertEquals(0, fileCache.size()); } public void testUsage() { @@ -258,7 +251,7 @@ public void testUsage() { } public void testStats() { - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); for (int i = 0; i < 4; i++) { fileCache.put(createPath(Integer.toString(i)), new StubCachedIndexInput(8 * MEGA_BYTES)); } @@ -284,7 +277,7 @@ public void testCacheRestore() throws IOException { String indexName = "test-index"; String shardId = "0"; createFile(indexName, shardId, "test.0"); - FileCache fileCache = createFileCache(GIGA_BYTES); + FileCache fileCache = createFileCache(MEGA_BYTES); assertEquals(0, fileCache.usage().usage()); Path fileCachePath = path.resolve(NodeEnvironment.CACHE_FOLDER).resolve(indexName).resolve(shardId); fileCache.restoreFromDirectory(List.of(fileCachePath)); diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java index 3bf7781fb909f..cc0764a6700b1 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java @@ -61,6 +61,7 @@ public void testReadContentNoSegmentInfos() throws IOException { Map expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); indexOutput.writeLong(1234); + indexOutput.writeLong(1234); indexOutput.writeLong(0); indexOutput.writeBytes(new byte[0], 0); indexOutput.close(); @@ -77,6 +78,7 @@ public void testReadContentWithSegmentInfos() throws IOException { Map expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); indexOutput.writeLong(1234); + indexOutput.writeLong(1234); ByteBuffersIndexOutput segmentInfosOutput = new ByteBuffersIndexOutput(new ByteBuffersDataOutput(), "test", "resource"); segmentInfos.write(segmentInfosOutput); byte[] segmentInfosBytes = segmentInfosOutput.toArrayCopy(); @@ -103,6 +105,7 @@ public void testWriteContent() throws IOException { RemoteSegmentMetadata remoteSegmentMetadata = new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(expectedOutput), segmentInfosBytes, + 1234, 1234 ); remoteSegmentMetadataHandler.writeContent(indexOutput, remoteSegmentMetadata); @@ -113,6 +116,7 @@ public void testWriteContent() throws IOException { ); assertEquals(expectedOutput, metadata.toMapOfStrings()); assertEquals(1234, metadata.getGeneration()); + assertEquals(1234, metadata.getPrimaryTerm()); assertArrayEquals(segmentInfosBytes, metadata.getSegmentInfosBytes()); } diff --git a/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java b/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java index b299ac4d66996..76301acac0c19 100644 --- a/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/CompoundProcessorTests.java @@ -33,6 +33,7 @@ package org.opensearch.ingest; import org.opensearch.OpenSearchException; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.test.OpenSearchTestCase; import org.junit.Before; @@ -433,10 +434,10 @@ private void assertStats(CompoundProcessor compoundProcessor, long count, long f } private void assertStats(int processor, CompoundProcessor compoundProcessor, long current, long count, long failed, long time) { - IngestStats.Stats stats = compoundProcessor.getProcessorsWithMetrics().get(processor).v2().createStats(); - assertThat(stats.getIngestCount(), equalTo(count)); - assertThat(stats.getIngestCurrent(), equalTo(current)); - assertThat(stats.getIngestFailedCount(), equalTo(failed)); - assertThat(stats.getIngestTimeInMillis(), equalTo(time)); + OperationStats stats = compoundProcessor.getProcessorsWithMetrics().get(processor).v2().createStats(); + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getCurrent(), equalTo(current)); + assertThat(stats.getFailedCount(), equalTo(failed)); + assertThat(stats.getTotalTimeInMillis(), equalTo(time)); } } diff --git a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java index a383ab9b97918..921ac10c02862 100644 --- a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java @@ -32,6 +32,7 @@ package org.opensearch.ingest; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; import org.opensearch.script.IngestConditionalScript; import org.opensearch.script.MockScriptEngine; @@ -250,10 +251,10 @@ private static void assertMutatingCtxThrows(Consumer> mutati } private static void assertStats(ConditionalProcessor conditionalProcessor, long count, long failed, long time) { - IngestStats.Stats stats = conditionalProcessor.getMetric().createStats(); - assertThat(stats.getIngestCount(), equalTo(count)); - assertThat(stats.getIngestCurrent(), equalTo(0L)); - assertThat(stats.getIngestFailedCount(), equalTo(failed)); - assertThat(stats.getIngestTimeInMillis(), greaterThanOrEqualTo(time)); + OperationStats stats = conditionalProcessor.getMetric().createStats(); + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getCurrent(), equalTo(0L)); + assertThat(stats.getFailedCount(), equalTo(failed)); + assertThat(stats.getTotalTimeInMillis(), greaterThanOrEqualTo(time)); } } diff --git a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java index 4176a32e32ad3..19fef468c529e 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestServiceTests.java @@ -58,6 +58,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SetOnce; import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.core.xcontent.XContentBuilder; @@ -1739,14 +1740,14 @@ private void assertPipelineStats(List pipelineStats, S assertStats(getPipelineStats(pipelineStats, pipelineId), count, failed, time); } - private void assertStats(IngestStats.Stats stats, long count, long failed, long time) { - assertThat(stats.getIngestCount(), equalTo(count)); - assertThat(stats.getIngestCurrent(), equalTo(0L)); - assertThat(stats.getIngestFailedCount(), equalTo(failed)); - assertThat(stats.getIngestTimeInMillis(), greaterThanOrEqualTo(time)); + private void assertStats(OperationStats stats, long count, long failed, long time) { + assertThat(stats.getCount(), equalTo(count)); + assertThat(stats.getCurrent(), equalTo(0L)); + assertThat(stats.getFailedCount(), equalTo(failed)); + assertThat(stats.getTotalTimeInMillis(), greaterThanOrEqualTo(time)); } - private IngestStats.Stats getPipelineStats(List pipelineStats, String id) { + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } } diff --git a/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java b/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java index b5c74f0ee5d16..b17e24ee5424d 100644 --- a/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java +++ b/server/src/test/java/org/opensearch/ingest/IngestStatsTests.java @@ -35,6 +35,7 @@ import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.test.OpenSearchTestCase; import java.io.IOException; @@ -48,7 +49,7 @@ public class IngestStatsTests extends OpenSearchTestCase { public void testSerialization() throws IOException { - IngestStats.Stats totalStats = new IngestStats.Stats(50, 100, 200, 300); + OperationStats totalStats = new OperationStats(50, 100, 200, 300); List pipelineStats = createPipelineStats(); Map> processorStats = createProcessorStats(pipelineStats); IngestStats ingestStats = new IngestStats(totalStats, pipelineStats, processorStats); @@ -57,20 +58,20 @@ public void testSerialization() throws IOException { } private List createPipelineStats() { - IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat("pipeline1", new IngestStats.Stats(3, 3, 3, 3)); - IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat("pipeline2", new IngestStats.Stats(47, 97, 197, 297)); - IngestStats.PipelineStat pipeline3Stats = new IngestStats.PipelineStat("pipeline3", new IngestStats.Stats(0, 0, 0, 0)); + IngestStats.PipelineStat pipeline1Stats = new IngestStats.PipelineStat("pipeline1", new OperationStats(3, 3, 3, 3)); + IngestStats.PipelineStat pipeline2Stats = new IngestStats.PipelineStat("pipeline2", new OperationStats(47, 97, 197, 297)); + IngestStats.PipelineStat pipeline3Stats = new IngestStats.PipelineStat("pipeline3", new OperationStats(0, 0, 0, 0)); return Stream.of(pipeline1Stats, pipeline2Stats, pipeline3Stats).collect(Collectors.toList()); } private Map> createProcessorStats(List pipelineStats) { assert (pipelineStats.size() >= 2); - IngestStats.ProcessorStat processor1Stat = new IngestStats.ProcessorStat("processor1", "type", new IngestStats.Stats(1, 1, 1, 1)); - IngestStats.ProcessorStat processor2Stat = new IngestStats.ProcessorStat("processor2", "type", new IngestStats.Stats(2, 2, 2, 2)); + IngestStats.ProcessorStat processor1Stat = new IngestStats.ProcessorStat("processor1", "type", new OperationStats(1, 1, 1, 1)); + IngestStats.ProcessorStat processor2Stat = new IngestStats.ProcessorStat("processor2", "type", new OperationStats(2, 2, 2, 2)); IngestStats.ProcessorStat processor3Stat = new IngestStats.ProcessorStat( "processor3", "type", - new IngestStats.Stats(47, 97, 197, 297) + new OperationStats(47, 97, 197, 297) ); // pipeline1 -> processor1,processor2; pipeline2 -> processor3 return MapBuilder.>newMapBuilder() @@ -132,14 +133,14 @@ private void assertIngestStats( } - private void assertStats(IngestStats.Stats fromObject, IngestStats.Stats fromStream) { - assertEquals(fromObject.getIngestCount(), fromStream.getIngestCount()); - assertEquals(fromObject.getIngestFailedCount(), fromStream.getIngestFailedCount()); - assertEquals(fromObject.getIngestTimeInMillis(), fromStream.getIngestTimeInMillis()); - assertEquals(fromObject.getIngestCurrent(), fromStream.getIngestCurrent()); + private void assertStats(OperationStats fromObject, OperationStats fromStream) { + assertEquals(fromObject.getCount(), fromStream.getCount()); + assertEquals(fromObject.getFailedCount(), fromStream.getFailedCount()); + assertEquals(fromObject.getTotalTimeInMillis(), fromStream.getTotalTimeInMillis()); + assertEquals(fromObject.getCurrent(), fromStream.getCurrent()); } - private IngestStats.Stats getPipelineStats(List pipelineStats, String id) { + private OperationStats getPipelineStats(List pipelineStats, String id) { return pipelineStats.stream().filter(p1 -> p1.getPipelineId().equals(id)).findFirst().map(p2 -> p2.getStats()).orElse(null); } } diff --git a/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java b/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java index 9f8dda15eeb65..3708b5bc32955 100644 --- a/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/PipelineProcessorTests.java @@ -32,6 +32,7 @@ package org.opensearch.ingest; import org.opensearch.OpenSearchException; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.script.ScriptService; import org.opensearch.script.TemplateScript; import org.opensearch.test.OpenSearchTestCase; @@ -192,29 +193,29 @@ public void testPipelineProcessorWithPipelineChain() throws Exception { assertNotNull(ingestDocument.getSourceAndMetadata().get(key1)); // check the stats - IngestStats.Stats pipeline1Stats = pipeline1.getMetrics().createStats(); - IngestStats.Stats pipeline2Stats = pipeline2.getMetrics().createStats(); - IngestStats.Stats pipeline3Stats = pipeline3.getMetrics().createStats(); + OperationStats pipeline1Stats = pipeline1.getMetrics().createStats(); + OperationStats pipeline2Stats = pipeline2.getMetrics().createStats(); + OperationStats pipeline3Stats = pipeline3.getMetrics().createStats(); // current - assertThat(pipeline1Stats.getIngestCurrent(), equalTo(0L)); - assertThat(pipeline2Stats.getIngestCurrent(), equalTo(0L)); - assertThat(pipeline3Stats.getIngestCurrent(), equalTo(0L)); + assertThat(pipeline1Stats.getCurrent(), equalTo(0L)); + assertThat(pipeline2Stats.getCurrent(), equalTo(0L)); + assertThat(pipeline3Stats.getCurrent(), equalTo(0L)); // count - assertThat(pipeline1Stats.getIngestCount(), equalTo(1L)); - assertThat(pipeline2Stats.getIngestCount(), equalTo(1L)); - assertThat(pipeline3Stats.getIngestCount(), equalTo(1L)); + assertThat(pipeline1Stats.getCount(), equalTo(1L)); + assertThat(pipeline2Stats.getCount(), equalTo(1L)); + assertThat(pipeline3Stats.getCount(), equalTo(1L)); // time - assertThat(pipeline1Stats.getIngestTimeInMillis(), equalTo(0L)); - assertThat(pipeline2Stats.getIngestTimeInMillis(), equalTo(3L)); - assertThat(pipeline3Stats.getIngestTimeInMillis(), equalTo(2L)); + assertThat(pipeline1Stats.getTotalTimeInMillis(), equalTo(0L)); + assertThat(pipeline2Stats.getTotalTimeInMillis(), equalTo(3L)); + assertThat(pipeline3Stats.getTotalTimeInMillis(), equalTo(2L)); // failure - assertThat(pipeline1Stats.getIngestFailedCount(), equalTo(0L)); - assertThat(pipeline2Stats.getIngestFailedCount(), equalTo(0L)); - assertThat(pipeline3Stats.getIngestFailedCount(), equalTo(1L)); + assertThat(pipeline1Stats.getFailedCount(), equalTo(0L)); + assertThat(pipeline2Stats.getFailedCount(), equalTo(0L)); + assertThat(pipeline3Stats.getFailedCount(), equalTo(1L)); } public void testIngestPipelineMetadata() { diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 43d371bf5a187..f5295bead19a4 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -58,6 +58,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.shard.ShardId; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; @@ -342,6 +343,15 @@ public void restoreShard( } + @Override + public RemoteStoreShardShallowCopySnapshot getRemoteStoreShallowCopyShardMetadata( + SnapshotId snapshotId, + IndexId indexId, + ShardId snapshotShardId + ) { + return null; + } + @Override public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { return null; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 8e0ee6b16ed48..105ccef500ce8 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -50,7 +50,12 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; @@ -117,11 +122,7 @@ protected void assertSnapshotOrGenericThread() { @Override protected Settings nodeSettings() { - return Settings.builder() - .put(super.nodeSettings()) - .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .put(FeatureFlags.REMOTE_STORE, "true") - .build(); + return Settings.builder().put(super.nodeSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); } public void testRetrieveSnapshots() throws Exception { @@ -326,12 +327,89 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - List snapshotIds = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) - .getSnapshotIds() + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + + List snapshotIds = repositoryData.getSnapshotIds() .stream() .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) .collect(Collectors.toList()); assertThat(snapshotIds, equalTo(originalSnapshots)); + + // shallow copy shard metadata - getRemoteStoreShallowCopyShardMetadata + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId2, + indexId, + new ShardId(remoteStoreIndexName, indexId.getId(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + } + + public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + logger.info("--> creating remote store repository"); + Settings remoteStoreRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create remote index shallow snapshot"); + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") + .setWaitForCompletion(true) + .setIndices(remoteStoreIndexName) + .get(); + final SnapshotId snapshotId = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); + assert lockFiles[0].endsWith(snapshotId.getUUID() + ".lock"); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexSettings indexSetting = getIndexSettings(remoteStoreIndexName); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId, + indexId, + new ShardId(remoteStoreIndexName, indexSetting.getUUID(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + assertEquals(shardShallowCopySnapshot.getIndexUUID(), indexSetting.getUUID()); + assertEquals(shardShallowCopySnapshot.getRepositoryBasePath(), ""); + } + + private IndexSettings getIndexSettings(String indexName) { + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexService(resolveIndex(indexName)); + return indexService.getIndexSettings(); } // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index d49d9fd41031c..219dddff40b35 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -30,6 +30,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.xcontent.XContentType; @@ -786,4 +787,126 @@ public void testExceptionOnResponseProcessing() throws Exception { // Exception thrown when processing response expectThrows(SearchPipelineProcessingException.class, () -> pipelinedRequest.transformResponse(response)); } + + public void testStats() throws Exception { + SearchRequestProcessor throwingRequestProcessor = new FakeRequestProcessor("throwing_request", "1", null, r -> { + throw new RuntimeException(); + }); + Map> requestProcessors = Map.of( + "successful_request", + (pf, t, f, c) -> new FakeRequestProcessor("successful_request", "2", null, r -> {}), + "throwing_request", + (pf, t, f, c) -> throwingRequestProcessor + ); + SearchResponseProcessor throwingResponseProcessor = new FakeResponseProcessor("throwing_response", "3", null, r -> { + throw new RuntimeException(); + }); + Map> responseProcessors = Map.of( + "successful_response", + (pf, t, f, c) -> new FakeResponseProcessor("successful_response", "4", null, r -> {}), + "throwing_response", + (pf, t, f, c) -> throwingResponseProcessor + ); + SearchPipelineService searchPipelineService = createWithProcessors(requestProcessors, responseProcessors); + + SearchPipelineMetadata metadata = new SearchPipelineMetadata( + Map.of( + "good_response_pipeline", + new PipelineConfiguration( + "good_response_pipeline", + new BytesArray("{\"response_processors\" : [ { \"successful_response\": {} } ] }"), + XContentType.JSON + ), + "bad_response_pipeline", + new PipelineConfiguration( + "bad_response_pipeline", + new BytesArray("{\"response_processors\" : [ { \"throwing_response\": {} } ] }"), + XContentType.JSON + ), + "good_request_pipeline", + new PipelineConfiguration( + "good_request_pipeline", + new BytesArray("{\"request_processors\" : [ { \"successful_request\": {} } ] }"), + XContentType.JSON + ), + "bad_request_pipeline", + new PipelineConfiguration( + "bad_request_pipeline", + new BytesArray("{\"request_processors\" : [ { \"throwing_request\": {} } ] }"), + XContentType.JSON + ) + ) + ); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + ClusterState previousState = clusterState; + clusterState = ClusterState.builder(clusterState) + .metadata(Metadata.builder().putCustom(SearchPipelineMetadata.TYPE, metadata)) + .build(); + searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousState)); + + SearchRequest request = new SearchRequest(); + SearchResponse response = new SearchResponse(null, null, 0, 0, 0, 0, null, null); + + searchPipelineService.resolvePipeline(request.pipeline("good_request_pipeline")).transformResponse(response); + expectThrows( + SearchPipelineProcessingException.class, + () -> searchPipelineService.resolvePipeline(request.pipeline("bad_request_pipeline")).transformResponse(response) + ); + searchPipelineService.resolvePipeline(request.pipeline("good_response_pipeline")).transformResponse(response); + expectThrows( + SearchPipelineProcessingException.class, + () -> searchPipelineService.resolvePipeline(request.pipeline("bad_response_pipeline")).transformResponse(response) + ); + + SearchPipelineStats stats = searchPipelineService.stats(); + assertPipelineStats(stats.getTotalRequestStats(), 2, 1); + assertPipelineStats(stats.getTotalResponseStats(), 2, 1); + for (SearchPipelineStats.PerPipelineStats perPipelineStats : stats.getPipelineStats()) { + SearchPipelineStats.PipelineDetailStats detailStats = stats.getPerPipelineProcessorStats() + .get(perPipelineStats.getPipelineId()); + switch (perPipelineStats.getPipelineId()) { + case "good_request_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 1, 0); + assertPipelineStats(perPipelineStats.getResponseStats(), 0, 0); + assertEquals(1, detailStats.requestProcessorStats().size()); + assertEquals(0, detailStats.responseProcessorStats().size()); + assertEquals("successful_request:2", detailStats.requestProcessorStats().get(0).getProcessorName()); + assertEquals("successful_request", detailStats.requestProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.requestProcessorStats().get(0).getStats(), 1, 0); + break; + case "bad_request_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 1, 1); + assertPipelineStats(perPipelineStats.getResponseStats(), 0, 0); + assertEquals(1, detailStats.requestProcessorStats().size()); + assertEquals(0, detailStats.responseProcessorStats().size()); + assertEquals("throwing_request:1", detailStats.requestProcessorStats().get(0).getProcessorName()); + assertEquals("throwing_request", detailStats.requestProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.requestProcessorStats().get(0).getStats(), 1, 1); + break; + case "good_response_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 0, 0); + assertPipelineStats(perPipelineStats.getResponseStats(), 1, 0); + assertEquals(0, detailStats.requestProcessorStats().size()); + assertEquals(1, detailStats.responseProcessorStats().size()); + assertEquals("successful_response:4", detailStats.responseProcessorStats().get(0).getProcessorName()); + assertEquals("successful_response", detailStats.responseProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.responseProcessorStats().get(0).getStats(), 1, 0); + break; + case "bad_response_pipeline": + assertPipelineStats(perPipelineStats.getRequestStats(), 0, 0); + assertPipelineStats(perPipelineStats.getResponseStats(), 1, 1); + assertEquals(0, detailStats.requestProcessorStats().size()); + assertEquals(1, detailStats.responseProcessorStats().size()); + assertEquals("throwing_response:3", detailStats.responseProcessorStats().get(0).getProcessorName()); + assertEquals("throwing_response", detailStats.responseProcessorStats().get(0).getProcessorType()); + assertPipelineStats(detailStats.responseProcessorStats().get(0).getStats(), 1, 1); + break; + } + } + } + + private static void assertPipelineStats(OperationStats stats, long count, long failed) { + assertEquals(stats.getCount(), count); + assertEquals(stats.getFailedCount(), failed); + } } diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java new file mode 100644 index 0000000000000..dac41f0db4e00 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineStatsTests.java @@ -0,0 +1,185 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pipeline; + +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.metrics.OperationStats; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.MediaType; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.core.xcontent.XContentParser; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class SearchPipelineStatsTests extends OpenSearchTestCase { + public void testSerializationRoundtrip() throws IOException { + SearchPipelineStats stats = createStats(); + SearchPipelineStats deserialized; + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + stats.writeTo(bytesStreamOutput); + try (StreamInput bytesStreamInput = bytesStreamOutput.bytes().streamInput()) { + deserialized = new SearchPipelineStats(bytesStreamInput); + } + } + assertEquals(stats, deserialized); + } + + private static SearchPipelineStats createStats() { + return new SearchPipelineStats( + new OperationStats(1, 2, 3, 4), + new OperationStats(5, 6, 7, 8), + List.of( + new SearchPipelineStats.PerPipelineStats("p1", new OperationStats(9, 10, 11, 12), new OperationStats(13, 14, 15, 16)), + new SearchPipelineStats.PerPipelineStats("p2", new OperationStats(17, 18, 19, 20), new OperationStats(21, 22, 23, 24)) + + ), + Map.of( + "p1", + new SearchPipelineStats.PipelineDetailStats( + List.of(new SearchPipelineStats.ProcessorStats("req1:a", "req1", new OperationStats(25, 26, 27, 28))), + List.of(new SearchPipelineStats.ProcessorStats("rsp1:a", "rsp1", new OperationStats(29, 30, 31, 32))) + ), + "p2", + new SearchPipelineStats.PipelineDetailStats( + List.of( + new SearchPipelineStats.ProcessorStats("req1:a", "req1", new OperationStats(33, 34, 35, 36)), + new SearchPipelineStats.ProcessorStats("req2", "req2", new OperationStats(37, 38, 39, 40)) + ), + List.of() + ) + ) + ); + } + + public void testToXContent() throws IOException { + XContentBuilder actualBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + actualBuilder.startObject(); + createStats().toXContent(actualBuilder, null); + actualBuilder.endObject(); + + String expected = "{" + + " \"search_pipeline\" : {" + + " \"total_request\" : {" + + " \"count\" : 1," + + " \"time_in_millis\" : 2," + + " \"current\" : 3," + + " \"failed\" : 4" + + " }," + + " \"total_response\" : {" + + " \"count\" : 5," + + " \"time_in_millis\" : 6," + + " \"current\" : 7," + + " \"failed\" : 8" + + " }," + + " \"pipelines\" : {" + + " \"p1\" : {" + + " \"request\" : {" + + " \"count\" : 9," + + " \"time_in_millis\" : 10," + + " \"current\" : 11," + + " \"failed\" : 12" + + " }," + + " \"response\" : {" + + " \"count\" : 13," + + " \"time_in_millis\" : 14," + + " \"current\" : 15," + + " \"failed\" : 16" + + " }," + + " \"request_processors\" : [" + + " {" + + " \"req1:a\" : {" + + " \"type\" : \"req1\"," + + " \"stats\" : {" + + " \"count\" : 25," + + " \"time_in_millis\" : 26," + + " \"current\" : 27," + + " \"failed\" : 28" + + " }" + + " }" + + " }" + + " ]," + + " \"response_processors\" : [" + + " {" + + " \"rsp1:a\" : {" + + " \"type\" : \"rsp1\"," + + " \"stats\" : {" + + " \"count\" : 29," + + " \"time_in_millis\" : 30," + + " \"current\" : 31," + + " \"failed\" : 32" + + " }" + + " }" + + " }" + + " ]" + + " }," + + " \"p2\" : {" + + " \"request\" : {" + + " \"count\" : 17," + + " \"time_in_millis\" : 18," + + " \"current\" : 19," + + " \"failed\" : 20" + + " }," + + " \"response\" : {" + + " \"count\" : 21," + + " \"time_in_millis\" : 22," + + " \"current\" : 23," + + " \"failed\" : 24" + + " }," + + " \"request_processors\" : [" + + " {" + + " \"req1:a\" : {" + + " \"type\" : \"req1\"," + + " \"stats\" : {" + + " \"count\" : 33," + + " \"time_in_millis\" : 34," + + " \"current\" : 35," + + " \"failed\" : 36" + + " }" + + " }" + + " }," + + " {" + + " \"req2\" : {" + + " \"type\" : \"req2\"," + + " \"stats\" : {" + + " \"count\" : 37," + + " \"time_in_millis\" : 38," + + " \"current\" : 39," + + " \"failed\" : 40" + + " }" + + " }" + + " }" + + " ]," + + " \"response_processors\" : [ ]" + + " }" + + " }" + + " }" + + "}"; + + XContentParser expectedParser = JsonXContent.jsonXContent.createParser( + this.xContentRegistry(), + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + expected + ); + XContentBuilder expectedBuilder = XContentBuilder.builder(JsonXContent.jsonXContent); + expectedBuilder.generator().copyCurrentStructure(expectedParser); + + assertEquals( + XContentHelper.convertToMap(BytesReference.bytes(expectedBuilder), false, (MediaType) XContentType.JSON), + XContentHelper.convertToMap(BytesReference.bytes(actualBuilder), false, (MediaType) XContentType.JSON) + ); + } +} diff --git a/server/src/test/java/org/opensearch/telemetry/TelemetryModuleTests.java b/server/src/test/java/org/opensearch/telemetry/TelemetryModuleTests.java new file mode 100644 index 0000000000000..45344ab4253f7 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/TelemetryModuleTests.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry; + +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.List; +import java.util.Optional; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TelemetryModuleTests extends OpenSearchTestCase { + + public void testGetTelemetrySuccess() { + TelemetryPlugin telemetryPlugin = mock(TelemetryPlugin.class); + when(telemetryPlugin.getName()).thenReturn("otel"); + Telemetry mockTelemetry = mock(Telemetry.class); + when(telemetryPlugin.getTelemetry(any())).thenReturn(Optional.of(mockTelemetry)); + List telemetryPlugins = List.of(telemetryPlugin); + + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, any()); + + assertTrue(telemetryModule.getTelemetry().isPresent()); + assertEquals(mockTelemetry, telemetryModule.getTelemetry().get()); + } + + public void testGetTelemetryWithMultipleInstalledPlugins() { + TelemetryPlugin telemetryPlugin1 = mock(TelemetryPlugin.class); + TelemetryPlugin telemetryPlugin2 = mock(TelemetryPlugin.class); + when(telemetryPlugin1.getName()).thenReturn("otel"); + Telemetry mockTelemetry1 = mock(Telemetry.class); + Telemetry mockTelemetry2 = mock(Telemetry.class); + + when(telemetryPlugin1.getTelemetry(any())).thenReturn(Optional.of(mockTelemetry1)); + when(telemetryPlugin2.getTelemetry(any())).thenReturn(Optional.of(mockTelemetry2)); + + List telemetryPlugins = List.of(telemetryPlugin1, telemetryPlugin2); + + try { + TelemetryModule telemetryModule = new TelemetryModule(telemetryPlugins, any()); + } catch (Exception e) { + assertEquals("Cannot register more than one telemetry", e.getMessage()); + } + + } + + public void testGetTelemetryWithNoPlugins() { + TelemetryPlugin telemetryPlugin = mock(TelemetryPlugin.class); + when(telemetryPlugin.getName()).thenReturn("otel"); + TelemetryModule telemetryModule = new TelemetryModule(List.of(telemetryPlugin), any()); + + assertFalse(telemetryModule.getTelemetry().isPresent()); + + } + +} diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java new file mode 100644 index 0000000000000..7968c6c43afb4 --- /dev/null +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.junit.After; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.telemetry.tracing.noop.NoopTracer; + +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class TracerFactoryTests extends OpenSearchTestCase { + + private TracerFactory tracerFactory; + + @After + public void close() { + tracerFactory.close(); + } + + public void testGetTracerWithTracingDisabledReturnsNoopTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), false).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof NoopTracer); + assertTrue(tracer.startSpan("foo") == Scope.NO_OP); + } + + public void testGetTracerWithTracingEnabledReturnsDefaultTracer() { + Settings settings = Settings.builder().put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true).build(); + TelemetrySettings telemetrySettings = new TelemetrySettings(settings, new ClusterSettings(settings, getClusterSettings())); + Telemetry mockTelemetry = mock(Telemetry.class); + when(mockTelemetry.getTracingTelemetry()).thenReturn(mock(TracingTelemetry.class)); + tracerFactory = new TracerFactory(telemetrySettings, Optional.of(mockTelemetry), new ThreadContext(Settings.EMPTY)); + + Tracer tracer = tracerFactory.getTracer(); + assertTrue(tracer instanceof DefaultTracer); + + } + + private Set> getClusterSettings() { + Set> allTracerSettings = new HashSet<>(); + ClusterSettings.FEATURE_FLAGGED_CLUSTER_SETTINGS.get(List.of(FeatureFlags.TELEMETRY)).stream().forEach((allTracerSettings::add)); + return allTracerSettings; + } +} diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index de6f69a4fd4ce..310b088674d5c 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -51,7 +51,7 @@ dependencies { api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" api 'com.google.code.gson:gson:2.10.1' - api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" + api "org.bouncycastle:bcpkix-jdk15to18:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index cf5f6613c3ea1..6634d1b4dbafc 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -119,7 +119,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getClusterManagerThrottlingStats(), nodeStats.getWeightedRoutingStats(), nodeStats.getFileCacheStats(), - nodeStats.getTaskCancellationStats() + nodeStats.getTaskCancellationStats(), + nodeStats.getSearchPipelineStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 659f473403ec8..ea9e9342673db 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -67,6 +67,7 @@ import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.io.PathUtils; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; @@ -101,6 +102,9 @@ import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; +import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; import org.opensearch.index.translog.InternalTranslogFactory; import org.opensearch.index.translog.RemoteBlobStoreInternalTranslogFactory; import org.opensearch.index.translog.Translog; @@ -574,7 +578,14 @@ protected IndexShard newShard( RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService = null; if (indexSettings.isRemoteStoreEnabled()) { if (remoteStore == null) { - remoteStore = createRemoteStore(createTempDir(), routing, indexMetadata); + Path remoteStorePath; + String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); + if (remoteStoreRepository != null && remoteStoreRepository.endsWith("__test")) { + remoteStorePath = PathUtils.get(remoteStoreRepository.replace("__test", "")); + } else { + remoteStorePath = createTempDir(); + } + remoteStore = createRemoteStore(remoteStorePath, routing, indexMetadata); } remoteRefreshSegmentPressureService = new RemoteRefreshSegmentPressureService(clusterService, indexSettings.getSettings()); } @@ -642,21 +653,30 @@ protected RepositoriesService createRepositoriesService() { protected Store createRemoteStore(Path path, ShardRouting shardRouting, IndexMetadata metadata) throws IOException { Settings nodeSettings = Settings.builder().put("node.name", shardRouting.currentNodeId()).build(); + ShardId shardId = shardRouting.shardId(); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = createRemoteSegmentStoreDirectory(shardId, path); + return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory); + } - ShardId shardId = new ShardId("index", "_na_", 0); + protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId shardId, Path path) throws IOException { NodeEnvironment.NodePath remoteNodePath = new NodeEnvironment.NodePath(path); ShardPath remoteShardPath = new ShardPath(false, remoteNodePath.resolve(shardId), remoteNodePath.resolve(shardId), shardId); RemoteDirectory dataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); RemoteDirectory metadataDirectory = newRemoteDirectory(remoteShardPath.resolveIndex()); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, null); - return createStore(shardId, new IndexSettings(metadata, nodeSettings), remoteSegmentStoreDirectory); + RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( + new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) + ); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { + return new RemoteDirectory(getBlobContainer(f)); + } + + protected BlobContainer getBlobContainer(Path f) throws IOException { FsBlobStore fsBlobStore = new FsBlobStore(1024, f, false); BlobPath blobPath = new BlobPath(); - BlobContainer fsBlobContainer = new FsBlobContainer(fsBlobStore, blobPath, f); - return new RemoteDirectory(fsBlobContainer); + return new FsBlobContainer(fsBlobStore, blobPath, f); } /** diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java index 28660ba834a65..ad515f2405f1d 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/BlobStoreTestUtil.java @@ -240,15 +240,18 @@ private static void assertSnapshotUUIDs(BlobStoreRepository repository, Reposito final BlobContainer repoRoot = repository.blobContainer(); final Collection snapshotIds = repositoryData.getSnapshotIds(); final List expectedSnapshotUUIDs = snapshotIds.stream().map(SnapshotId::getUUID).collect(Collectors.toList()); - for (String prefix : new String[] { BlobStoreRepository.SNAPSHOT_PREFIX, BlobStoreRepository.METADATA_PREFIX }) { - final Collection foundSnapshotUUIDs = repoRoot.listBlobs() - .keySet() - .stream() - .filter(p -> p.startsWith(prefix)) - .map(p -> p.replace(prefix, "").replace(".dat", "")) - .collect(Collectors.toSet()); - assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY))); + Collection foundSnapshotUUIDs = new HashSet<>(); + for (String prefix : new String[] { BlobStoreRepository.SNAPSHOT_PREFIX, BlobStoreRepository.SHALLOW_SNAPSHOT_PREFIX }) { + foundSnapshotUUIDs.addAll( + repoRoot.listBlobs() + .keySet() + .stream() + .filter(p -> p.startsWith(prefix)) + .map(p -> p.replace(prefix, "").replace(".dat", "")) + .collect(Collectors.toSet()) + ); } + assertThat(foundSnapshotUUIDs, containsInAnyOrder(expectedSnapshotUUIDs.toArray(Strings.EMPTY_ARRAY))); final BlobContainer indicesContainer = repository.getBlobContainer().children().get("indices"); final Map indices; @@ -303,10 +306,16 @@ private static void assertSnapshotUUIDs(BlobStoreRepository repository, Reposito .stream() .noneMatch(shardFailure -> shardFailure.index().equals(index) && shardFailure.shardId() == shardId)) { final Map shardPathContents = shardContainer.listBlobs(); - assertThat( - shardPathContents, - hasKey(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID())) + + assertTrue( + shardPathContents.containsKey( + String.format(Locale.ROOT, BlobStoreRepository.SHALLOW_SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) + ) + || shardPathContents.containsKey( + String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotId.getUUID()) + ) ); + assertThat( shardPathContents.keySet() .stream() diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 9933297aa1c96..ddf9f3e96b9b4 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -105,6 +105,7 @@ public abstract class AbstractSnapshotIntegTestCase extends OpenSearchIntegTestCase { + protected final static String TEST_REMOTE_STORE_REPO_SUFFIX = "__rs"; private static final String OLD_VERSION_SNAPSHOT_PREFIX = "old-version-snapshot-"; // Large snapshot pool settings to set up nodes for tests involving multiple repositories that need to have enough @@ -148,14 +149,19 @@ public void verifyNoLeakedListeners() throws Exception { @After public void assertRepoConsistency() { if (skipRepoConsistencyCheckReason == null) { - clusterAdmin().prepareGetRepositories().get().repositories().forEach(repositoryMetadata -> { - final String name = repositoryMetadata.name(); - if (repositoryMetadata.settings().getAsBoolean("readonly", false) == false) { - clusterAdmin().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); - clusterAdmin().prepareCleanupRepository(name).get(); - } - BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); - }); + clusterAdmin().prepareGetRepositories() + .get() + .repositories() + .stream() + .filter(repositoryMetadata -> !repositoryMetadata.name().endsWith(TEST_REMOTE_STORE_REPO_SUFFIX)) + .forEach(repositoryMetadata -> { + final String name = repositoryMetadata.name(); + if (repositoryMetadata.settings().getAsBoolean("readonly", false) == false) { + clusterAdmin().prepareDeleteSnapshot(name, OLD_VERSION_SNAPSHOT_PREFIX + "*").get(); + clusterAdmin().prepareCleanupRepository(name).get(); + } + BlobStoreTestUtil.assertRepoConsistency(internalCluster(), name); + }); } else { logger.info("--> skipped repo consistency checks because [{}]", skipRepoConsistencyCheckReason); } @@ -367,10 +373,36 @@ protected void createRepository(String repoName, String type, Settings.Builder s assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); } + protected void updateRepository(String repoName, String type, Settings.Builder settings) { + logger.info("--> updating repository [{}] [{}]", repoName, type); + assertAcked(clusterAdmin().preparePutRepository(repoName).setType(type).setSettings(settings)); + } + protected void createRepository(String repoName, String type, Path location) { createRepository(repoName, type, Settings.builder().put("location", location)); } + protected Settings.Builder getRepositorySettings(Path location, boolean shallowCopyEnabled) { + Settings.Builder settingsBuilder = randomRepositorySettings(); + settingsBuilder.put("location", location); + if (shallowCopyEnabled) { + settingsBuilder.put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true); + } + return settingsBuilder; + } + + protected Settings.Builder getRepositorySettings(Path location, String basePath, boolean shallowCopyEnabled) { + Settings.Builder settingsBuilder = randomRepositorySettings(); + settingsBuilder.put("location", location); + if (shallowCopyEnabled) { + settingsBuilder.put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true); + } + if (basePath != null) { + settingsBuilder.put("base_path", basePath); + } + return settingsBuilder; + } + protected void createRepository(String repoName, String type) { createRepository(repoName, type, randomRepositorySettings()); } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 38617d09b703d..49d8b64bc71cd 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2685,6 +2685,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat(